repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
deepmind/pysc2
pysc2/lib/features.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/features.py#L774-L832
def observation_spec(self): """The observation spec for the SC2 environment. It's worth noting that the image-like observations are in y,x/row,column order which is different than the actions which are in x,y order. This is due to conflicting conventions, and to facilitate printing of the images. Returns: The dict of observation names to their tensor shapes. Shapes with a 0 can vary in length, for example the number of valid actions depends on which units you have selected. """ obs_spec = named_array.NamedDict({ "action_result": (0,), # See error.proto: ActionResult. "alerts": (0,), # See sc2api.proto: Alert. "available_actions": (0,), "build_queue": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "cargo": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "cargo_slots_available": (1,), "control_groups": (10, 2), "game_loop": (1,), "last_actions": (0,), "multi_select": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "player": (len(Player),), # pytype: disable=wrong-arg-types "score_cumulative": (len(ScoreCumulative),), # pytype: disable=wrong-arg-types "score_by_category": (len(ScoreByCategory), len(ScoreCategories)), # pytype: disable=wrong-arg-types "score_by_vital": (len(ScoreByVital), len(ScoreVitals)), # pytype: disable=wrong-arg-types "single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1). # pytype: disable=wrong-arg-types }) aif = self._agent_interface_format if aif.feature_dimensions: obs_spec["feature_screen"] = (len(SCREEN_FEATURES), aif.feature_dimensions.screen.y, aif.feature_dimensions.screen.x) obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES), aif.feature_dimensions.minimap.y, aif.feature_dimensions.minimap.x) if aif.rgb_dimensions: obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y, aif.rgb_dimensions.screen.x, 3) obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y, aif.rgb_dimensions.minimap.x, 3) if aif.use_feature_units: obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types if aif.use_raw_units: obs_spec["raw_units"] = (0, len(FeatureUnit)) if aif.use_unit_counts: obs_spec["unit_counts"] = (0, len(UnitCounts)) if aif.use_camera_position: obs_spec["camera_position"] = (2,) return obs_spec
[ "def", "observation_spec", "(", "self", ")", ":", "obs_spec", "=", "named_array", ".", "NamedDict", "(", "{", "\"action_result\"", ":", "(", "0", ",", ")", ",", "# See error.proto: ActionResult.", "\"alerts\"", ":", "(", "0", ",", ")", ",", "# See sc2api.proto...
The observation spec for the SC2 environment. It's worth noting that the image-like observations are in y,x/row,column order which is different than the actions which are in x,y order. This is due to conflicting conventions, and to facilitate printing of the images. Returns: The dict of observation names to their tensor shapes. Shapes with a 0 can vary in length, for example the number of valid actions depends on which units you have selected.
[ "The", "observation", "spec", "for", "the", "SC2", "environment", "." ]
python
train
45.79661
leandroarndt/djangospam
djangospam/logger.py
https://github.com/leandroarndt/djangospam/blob/57fa9cfbf54a40f0e0652d0155dbb3451c14b69d/djangospam/logger.py#L46-L62
def log(ltype, method, page, user_agent): """Writes to the log a message in the following format:: "<datetime>: <exception> method <HTTP method> page <path> \ user agent <user_agent>" """ try: f = open(settings.DJANGOSPAM_LOG, "a") f.write("%s: %s method %s page %s user agent %s\n" % \ (datetime.datetime.now(), ltype, method, page, user_agent)) f.close() except: if settings.DJANGOSPAM_FAIL_ON_LOG: exc_type, exc_value = sys.exc_info()[:2] raise LogError(exc_type, exc_value)
[ "def", "log", "(", "ltype", ",", "method", ",", "page", ",", "user_agent", ")", ":", "try", ":", "f", "=", "open", "(", "settings", ".", "DJANGOSPAM_LOG", ",", "\"a\"", ")", "f", ".", "write", "(", "\"%s: %s method %s page %s user agent %s\\n\"", "%", "(",...
Writes to the log a message in the following format:: "<datetime>: <exception> method <HTTP method> page <path> \ user agent <user_agent>"
[ "Writes", "to", "the", "log", "a", "message", "in", "the", "following", "format", "::", "<datetime", ">", ":", "<exception", ">", "method", "<HTTP", "method", ">", "page", "<path", ">", "\\", "user", "agent", "<user_agent", ">" ]
python
train
34.117647
pudo/jsongraph
jsongraph/query.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/query.py#L132-L152
def query(self, parents=None): """ Compose the query and generate SPARQL. """ # TODO: benchmark single-query strategy q = Select([]) q = self.project(q, parent=True) q = self.filter(q, parents=parents) if self.parent is None: subq = Select([self.var]) subq = self.filter(subq, parents=parents) subq = subq.offset(self.node.offset) subq = subq.limit(self.node.limit) subq = subq.distinct() # TODO: sorting. subq = subq.order_by(desc(self.var)) q = q.where(subq) # if hasattr(self.context, 'identifier'): # q._where = graph(self.context.identifier, q._where) log.debug("Compiled query: %r", q.compile()) return q
[ "def", "query", "(", "self", ",", "parents", "=", "None", ")", ":", "# TODO: benchmark single-query strategy", "q", "=", "Select", "(", "[", "]", ")", "q", "=", "self", ".", "project", "(", "q", ",", "parent", "=", "True", ")", "q", "=", "self", ".",...
Compose the query and generate SPARQL.
[ "Compose", "the", "query", "and", "generate", "SPARQL", "." ]
python
train
36.761905
qualisys/qualisys_python_sdk
qtm/qrt.py
https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/qrt.py#L195-L202
async def release_control(self): """Release control of QTM. """ cmd = "releasecontrol" return await asyncio.wait_for( self._protocol.send_command(cmd), timeout=self._timeout )
[ "async", "def", "release_control", "(", "self", ")", ":", "cmd", "=", "\"releasecontrol\"", "return", "await", "asyncio", ".", "wait_for", "(", "self", ".", "_protocol", ".", "send_command", "(", "cmd", ")", ",", "timeout", "=", "self", ".", "_timeout", ")...
Release control of QTM.
[ "Release", "control", "of", "QTM", "." ]
python
valid
27.625
IGBC/PySketch
sketches/__init__.py
https://github.com/IGBC/PySketch/blob/3b39410a85693b46704e75739e70301cfea33523/sketches/__init__.py#L44-L65
def __register_library(self, module_name: str, attr: str, fallback: str = None): """Inserts Interpreter Library of imports into sketch in a very non-consensual way""" # Import the module Named in the string try: module = importlib.import_module(module_name) # If module is not found it checks if an alternative is is listed # If it is then it substitutes it, just so that the code can run except ImportError: if fallback is not None: module = importlib.import_module(fallback) self.__logger.warn(module_name + " not available: Replaced with " + fallback) else: self.__logger.warn(module_name + " not available: No Replacement Specified") # Cram the module into the __sketch in the form of module -> "attr" # AKA the same as `import module as attr` if not attr in dir(self.__sketch): setattr(self.__sketch, attr, module) else: self.__logger.warn(attr +" could not be imported as it's label is already used in the sketch")
[ "def", "__register_library", "(", "self", ",", "module_name", ":", "str", ",", "attr", ":", "str", ",", "fallback", ":", "str", "=", "None", ")", ":", "# Import the module Named in the string", "try", ":", "module", "=", "importlib", ".", "import_module", "(",...
Inserts Interpreter Library of imports into sketch in a very non-consensual way
[ "Inserts", "Interpreter", "Library", "of", "imports", "into", "sketch", "in", "a", "very", "non", "-", "consensual", "way" ]
python
valid
49.545455
hvac/hvac
hvac/api/system_backend/auth.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/auth.py#L96-L117
def read_auth_method_tuning(self, path): """Read the given auth path's configuration. This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via sys/mounts/auth/[auth-path]/tune. Supported methods: GET: /sys/auth/{path}/tune. Produces: 200 application/json :param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type" argument. :type path: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/sys/auth/{path}/tune'.format( path=path, ) response = self._adapter.get( url=api_path, ) return response.json()
[ "def", "read_auth_method_tuning", "(", "self", ",", "path", ")", ":", "api_path", "=", "'/v1/sys/auth/{path}/tune'", ".", "format", "(", "path", "=", "path", ",", ")", "response", "=", "self", ".", "_adapter", ".", "get", "(", "url", "=", "api_path", ",", ...
Read the given auth path's configuration. This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via sys/mounts/auth/[auth-path]/tune. Supported methods: GET: /sys/auth/{path}/tune. Produces: 200 application/json :param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type" argument. :type path: str | unicode :return: The JSON response of the request. :rtype: dict
[ "Read", "the", "given", "auth", "path", "s", "configuration", "." ]
python
train
35.909091
gabstopper/smc-python
smc/elements/user.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/user.py#L120-L135
def permissions(self): """ Return each permission role mapping for this Admin User. A permission role will have 3 fields: * Domain * Role (Viewer, Operator, etc) * Elements (Engines, Policies, or ACLs) :return: permissions as list :rtype: list(Permission) """ if 'permissions' in self.data: _permissions = self.data['permissions']['permission'] return [Permission(**perm) for perm in _permissions] return []
[ "def", "permissions", "(", "self", ")", ":", "if", "'permissions'", "in", "self", ".", "data", ":", "_permissions", "=", "self", ".", "data", "[", "'permissions'", "]", "[", "'permission'", "]", "return", "[", "Permission", "(", "*", "*", "perm", ")", ...
Return each permission role mapping for this Admin User. A permission role will have 3 fields: * Domain * Role (Viewer, Operator, etc) * Elements (Engines, Policies, or ACLs) :return: permissions as list :rtype: list(Permission)
[ "Return", "each", "permission", "role", "mapping", "for", "this", "Admin", "User", ".", "A", "permission", "role", "will", "have", "3", "fields", ":", "*", "Domain", "*", "Role", "(", "Viewer", "Operator", "etc", ")", "*", "Elements", "(", "Engines", "Po...
python
train
32.625
MisterY/gnucash-portfolio
gnucash_portfolio/lib/templates.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/lib/templates.py#L7-L23
def load_jinja_template(file_name): """ Loads the jinja2 HTML template from the given file. Assumes that the file is in the same directory as the script. """ original_script_path = sys.argv[0] #script_path = os.path.dirname(os.path.realpath(__file__)) script_dir = os.path.dirname(original_script_path) # file_path = os.path.join(script_path, file_name) # with open(file_path, 'r') as template_file: # return template_file.read() from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(script_dir)) template = env.get_template(file_name) return template
[ "def", "load_jinja_template", "(", "file_name", ")", ":", "original_script_path", "=", "sys", ".", "argv", "[", "0", "]", "#script_path = os.path.dirname(os.path.realpath(__file__))", "script_dir", "=", "os", ".", "path", ".", "dirname", "(", "original_script_path", "...
Loads the jinja2 HTML template from the given file. Assumes that the file is in the same directory as the script.
[ "Loads", "the", "jinja2", "HTML", "template", "from", "the", "given", "file", ".", "Assumes", "that", "the", "file", "is", "in", "the", "same", "directory", "as", "the", "script", "." ]
python
train
37.294118
peterbrittain/asciimatics
asciimatics/screen.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/screen.py#L59-L70
def clear(self, fg, attr, bg): """ Clear the double-buffer. This does not clear the screen buffer and so the next call to deltas will still show all changes. :param fg: The foreground colour to use for the new buffer. :param attr: The attribute value to use for the new buffer. :param bg: The background colour to use for the new buffer. """ line = [(ord(u" "), fg, attr, bg, 1) for _ in range(self._width)] self._double_buffer = [line[:] for _ in range(self._height)]
[ "def", "clear", "(", "self", ",", "fg", ",", "attr", ",", "bg", ")", ":", "line", "=", "[", "(", "ord", "(", "u\" \"", ")", ",", "fg", ",", "attr", ",", "bg", ",", "1", ")", "for", "_", "in", "range", "(", "self", ".", "_width", ")", "]", ...
Clear the double-buffer. This does not clear the screen buffer and so the next call to deltas will still show all changes. :param fg: The foreground colour to use for the new buffer. :param attr: The attribute value to use for the new buffer. :param bg: The background colour to use for the new buffer.
[ "Clear", "the", "double", "-", "buffer", "." ]
python
train
44.25
DataBiosphere/toil
src/toil/serviceManager.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/serviceManager.py#L121-L129
def killServices(self, services, error=False): """ :param dict services: Maps service jobStoreIDs to the communication flags for the service """ for serviceJobStoreID in services: serviceJob = services[serviceJobStoreID] if error: self.jobStore.deleteFile(serviceJob.errorJobStoreID) self.jobStore.deleteFile(serviceJob.terminateJobStoreID)
[ "def", "killServices", "(", "self", ",", "services", ",", "error", "=", "False", ")", ":", "for", "serviceJobStoreID", "in", "services", ":", "serviceJob", "=", "services", "[", "serviceJobStoreID", "]", "if", "error", ":", "self", ".", "jobStore", ".", "d...
:param dict services: Maps service jobStoreIDs to the communication flags for the service
[ ":", "param", "dict", "services", ":", "Maps", "service", "jobStoreIDs", "to", "the", "communication", "flags", "for", "the", "service" ]
python
train
46.222222
BlueBrain/NeuroM
examples/features_graph_table.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/features_graph_table.py#L94-L106
def plot_feature(feature, cell): '''Plot a feature ''' fig = pl.figure() ax = fig.add_subplot(111) if cell is not None: try: histogram(cell, feature, ax) except ValueError: pass stylize(ax, cell.name, feature) return fig
[ "def", "plot_feature", "(", "feature", ",", "cell", ")", ":", "fig", "=", "pl", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "if", "cell", "is", "not", "None", ":", "try", ":", "histogram", "(", "cell", ",", "fe...
Plot a feature
[ "Plot", "a", "feature" ]
python
train
21.615385
tgalal/python-axolotl
axolotl/groups/groupcipher.py
https://github.com/tgalal/python-axolotl/blob/0c681af4b756f556e23a9bf961abfbc6f82800cc/axolotl/groups/groupcipher.py#L110-L117
def getCipherText(self, iv, key, plaintext): """ :type iv: bytearray :type key: bytearray :type plaintext: bytearray """ cipher = AESCipher(key, iv) return cipher.encrypt(bytes(plaintext))
[ "def", "getCipherText", "(", "self", ",", "iv", ",", "key", ",", "plaintext", ")", ":", "cipher", "=", "AESCipher", "(", "key", ",", "iv", ")", "return", "cipher", ".", "encrypt", "(", "bytes", "(", "plaintext", ")", ")" ]
:type iv: bytearray :type key: bytearray :type plaintext: bytearray
[ ":", "type", "iv", ":", "bytearray", ":", "type", "key", ":", "bytearray", ":", "type", "plaintext", ":", "bytearray" ]
python
train
29.625
mollie/mollie-api-python
mollie/api/objects/list.py
https://github.com/mollie/mollie-api-python/blob/307836b70f0439c066718f1e375fa333dc6e5d77/mollie/api/objects/list.py#L49-L54
def get_next(self): """Return the next set of objects in a list""" url = self._get_link('next') resource = self.object_type.get_resource_class(self.client) resp = resource.perform_api_call(resource.REST_READ, url) return List(resp, self.object_type, self.client)
[ "def", "get_next", "(", "self", ")", ":", "url", "=", "self", ".", "_get_link", "(", "'next'", ")", "resource", "=", "self", ".", "object_type", ".", "get_resource_class", "(", "self", ".", "client", ")", "resp", "=", "resource", ".", "perform_api_call", ...
Return the next set of objects in a list
[ "Return", "the", "next", "set", "of", "objects", "in", "a", "list" ]
python
train
49.5
deep-compute/deeputil
deeputil/misc.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L111-L132
def parse_location(loc, default_port): ''' loc can be of the format http://<ip/domain>[:<port>] eg: http://localhost:8888 http://localhost/ return ip (str), port (int) >>> parse_location('http://localhost/', 6379) ('localhost', 6379) >>> parse_location('http://localhost:8888', 6379) ('localhost', 8888) ''' parsed = urlparse(loc) if ':' in parsed.netloc: ip, port = parsed.netloc.split(':') port = int(port) else: ip, port = parsed.netloc, default_port return ip, port
[ "def", "parse_location", "(", "loc", ",", "default_port", ")", ":", "parsed", "=", "urlparse", "(", "loc", ")", "if", "':'", "in", "parsed", ".", "netloc", ":", "ip", ",", "port", "=", "parsed", ".", "netloc", ".", "split", "(", "':'", ")", "port", ...
loc can be of the format http://<ip/domain>[:<port>] eg: http://localhost:8888 http://localhost/ return ip (str), port (int) >>> parse_location('http://localhost/', 6379) ('localhost', 6379) >>> parse_location('http://localhost:8888', 6379) ('localhost', 8888)
[ "loc", "can", "be", "of", "the", "format", "http", ":", "//", "<ip", "/", "domain", ">", "[", ":", "<port", ">", "]", "eg", ":", "http", ":", "//", "localhost", ":", "8888", "http", ":", "//", "localhost", "/", "return", "ip", "(", "str", ")", ...
python
train
24.681818
google/grr
grr/server/grr_response_server/rdfvalues/objects.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/rdfvalues/objects.py#L106-L115
def GetIPAddresses(self): """IP addresses from all interfaces.""" result = [] filtered_ips = ["127.0.0.1", "::1", "fe80::1"] for interface in self.interfaces: for address in interface.addresses: if address.human_readable_address not in filtered_ips: result.append(Text(address.human_readable_address)) return sorted(result)
[ "def", "GetIPAddresses", "(", "self", ")", ":", "result", "=", "[", "]", "filtered_ips", "=", "[", "\"127.0.0.1\"", ",", "\"::1\"", ",", "\"fe80::1\"", "]", "for", "interface", "in", "self", ".", "interfaces", ":", "for", "address", "in", "interface", ".",...
IP addresses from all interfaces.
[ "IP", "addresses", "from", "all", "interfaces", "." ]
python
train
35.9
opendatateam/udata
udata/commands/__init__.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/__init__.py#L207-L238
def load_udata_commands(self, ctx): ''' Load udata commands from: - `udata.commands.*` module - known internal modules with commands - plugins exporting a `udata.commands` entrypoint ''' if self._udata_commands_loaded: return # Load all commands submodules pattern = os.path.join(os.path.dirname(__file__), '[!_]*.py') for filename in iglob(pattern): module = os.path.splitext(os.path.basename(filename))[0] try: __import__('udata.commands.{0}'.format(module)) except Exception as e: error('Unable to import {0}'.format(module), e) # Load all core modules commands for module in MODULES_WITH_COMMANDS: try: __import__('udata.{0}.commands'.format(module)) except Exception as e: error('Unable to import {0}'.format(module), e) # Load commands from entry points for enabled plugins app = ctx.ensure_object(ScriptInfo).load_app() entrypoints.get_enabled('udata.commands', app) # Ensure loading happens once self._udata_commands_loaded = False
[ "def", "load_udata_commands", "(", "self", ",", "ctx", ")", ":", "if", "self", ".", "_udata_commands_loaded", ":", "return", "# Load all commands submodules", "pattern", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__fil...
Load udata commands from: - `udata.commands.*` module - known internal modules with commands - plugins exporting a `udata.commands` entrypoint
[ "Load", "udata", "commands", "from", ":", "-", "udata", ".", "commands", ".", "*", "module", "-", "known", "internal", "modules", "with", "commands", "-", "plugins", "exporting", "a", "udata", ".", "commands", "entrypoint" ]
python
train
37
joferkington/mplstereonet
mplstereonet/utilities.py
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L223-L249
def parse_azimuth(azimuth): """ Parses an azimuth measurement in azimuth or quadrant format. Parameters ----------- azimuth : string or number An azimuth measurement in degrees or a quadrant measurement of azimuth. Returns ------- azi : float The azimuth in degrees clockwise from north (range: 0-360) See Also -------- parse_quadrant_measurement parse_strike_dip parse_plunge_bearing """ try: azimuth = float(azimuth) except ValueError: if not azimuth[0].isalpha(): raise ValueError('Ambiguous azimuth: {}'.format(azimuth)) azimuth = parse_quadrant_measurement(azimuth) return azimuth
[ "def", "parse_azimuth", "(", "azimuth", ")", ":", "try", ":", "azimuth", "=", "float", "(", "azimuth", ")", "except", "ValueError", ":", "if", "not", "azimuth", "[", "0", "]", ".", "isalpha", "(", ")", ":", "raise", "ValueError", "(", "'Ambiguous azimuth...
Parses an azimuth measurement in azimuth or quadrant format. Parameters ----------- azimuth : string or number An azimuth measurement in degrees or a quadrant measurement of azimuth. Returns ------- azi : float The azimuth in degrees clockwise from north (range: 0-360) See Also -------- parse_quadrant_measurement parse_strike_dip parse_plunge_bearing
[ "Parses", "an", "azimuth", "measurement", "in", "azimuth", "or", "quadrant", "format", "." ]
python
train
25.259259
saltstack/salt
salt/cli/daemons.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L534-L568
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], pki_dir=self.config['pki_dir'], ) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) # Late import so logging works correctly import salt.minion self.daemonize_if_required() self.syndic = salt.minion.SyndicManager(self.config) self.set_pidfile()
[ "def", "prepare", "(", "self", ")", ":", "super", "(", "Syndic", ",", "self", ")", ".", "prepare", "(", ")", "try", ":", "if", "self", ".", "config", "[", "'verify_env'", "]", ":", "verify_env", "(", "[", "self", ".", "config", "[", "'pki_dir'", "]...
Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare()
[ "Run", "the", "preparation", "sequence", "required", "to", "start", "a", "salt", "syndic", "minion", "." ]
python
train
34.742857
python-hyper/wsproto
example/synchronous_client.py
https://github.com/python-hyper/wsproto/blob/a7abcc5a9f7ad126668afb0cc9932da08c87f40f/example/synchronous_client.py#L33-L103
def wsproto_demo(host, port): ''' Demonstrate wsproto: 0) Open TCP connection 1) Negotiate WebSocket opening handshake 2) Send a message and display response 3) Send ping and display pong 4) Negotiate WebSocket closing handshake :param stream: a socket stream ''' # 0) Open TCP connection print('Connecting to {}:{}'.format(host, port)) conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.connect((host, port)) # 1) Negotiate WebSocket opening handshake print('Opening WebSocket') ws = WSConnection(ConnectionType.CLIENT) net_send(ws.send(Request(host=host, target='server')), conn) net_recv(ws, conn) # events is a generator that yields websocket event objects. Usually you # would say `for event in ws.events()`, but the synchronous nature of this # client requires us to use next(event) instead so that we can interleave # the network I/O. It will raise StopIteration when it runs out of events # (i.e. needs more network data), but since this script is synchronous, we # will explicitly resume the generator whenever we have new network data. events = ws.events() # Because this is a client WebSocket, wsproto has automatically queued up # a handshake, and we need to send it and wait for a response. event = next(events) if isinstance(event, AcceptConnection): print('WebSocket negotiation complete') else: raise Exception('Expected AcceptConnection event!') # 2) Send a message and display response message = "wsproto is great" print('Sending message: {}'.format(message)) net_send(ws.send(Message(data=message)), conn) net_recv(ws, conn) event = next(events) if isinstance(event, TextMessage): print('Received message: {}'.format(event.data)) else: raise Exception('Expected TextMessage event!') # 3) Send ping and display pong payload = b"table tennis" print('Sending ping: {}'.format(payload)) net_send(ws.send(Ping(payload=payload)), conn) net_recv(ws, conn) event = next(events) if isinstance(event, Pong): print('Received pong: {}'.format(event.payload)) else: raise Exception('Expected Pong event!') # 4) Negotiate WebSocket closing handshake print('Closing WebSocket') net_send(ws.send(CloseConnection(code=1000, reason='sample reason')), conn) # After sending the closing frame, we won't get any more events. The server # should send a reply and then close the connection, so we need to receive # twice: net_recv(ws, conn) conn.shutdown(socket.SHUT_WR) net_recv(ws, conn)
[ "def", "wsproto_demo", "(", "host", ",", "port", ")", ":", "# 0) Open TCP connection", "print", "(", "'Connecting to {}:{}'", ".", "format", "(", "host", ",", "port", ")", ")", "conn", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "sock...
Demonstrate wsproto: 0) Open TCP connection 1) Negotiate WebSocket opening handshake 2) Send a message and display response 3) Send ping and display pong 4) Negotiate WebSocket closing handshake :param stream: a socket stream
[ "Demonstrate", "wsproto", ":" ]
python
train
36.661972
pytroll/satpy
satpy/readers/aapp_l1b.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/aapp_l1b.py#L158-L191
def get_angles(self, angle_id): """Get sun-satellite viewing angles""" tic = datetime.now() sunz40km = self._data["ang"][:, :, 0] * 1e-2 satz40km = self._data["ang"][:, :, 1] * 1e-2 azidiff40km = self._data["ang"][:, :, 2] * 1e-2 try: from geotiepoints.interpolator import Interpolator except ImportError: logger.warning("Could not interpolate sun-sat angles, " "python-geotiepoints missing.") self.sunz, self.satz, self.azidiff = sunz40km, satz40km, azidiff40km else: cols40km = np.arange(24, 2048, 40) cols1km = np.arange(2048) lines = sunz40km.shape[0] rows40km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = Interpolator( [sunz40km, satz40km, azidiff40km], (rows40km, cols40km), (rows1km, cols1km), along_track_order, cross_track_order) self.sunz, self.satz, self.azidiff = satint.interpolate() logger.debug("Interpolate sun-sat angles: time %s", str(datetime.now() - tic)) return create_xarray(getattr(self, ANGLES[angle_id]))
[ "def", "get_angles", "(", "self", ",", "angle_id", ")", ":", "tic", "=", "datetime", ".", "now", "(", ")", "sunz40km", "=", "self", ".", "_data", "[", "\"ang\"", "]", "[", ":", ",", ":", ",", "0", "]", "*", "1e-2", "satz40km", "=", "self", ".", ...
Get sun-satellite viewing angles
[ "Get", "sun", "-", "satellite", "viewing", "angles" ]
python
train
37.323529
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L817-L834
def _press_pwr_btn(self, pushType="Press"): """Simulates a physical press of the server power button. :param pushType: Type of power button press to simulate Supported values are: 'Press' and 'PressAndHold' :raises: IloError, on an error from iLO. """ power_settings = {"Action": "PowerButton", "Target": "/Oem/Hp", "PushType": pushType} systems_uri = "/rest/v1/Systems/1" status, headers, response = self._rest_post(systems_uri, None, power_settings) if status >= 300: msg = self._get_extended_error(response) raise exception.IloError(msg)
[ "def", "_press_pwr_btn", "(", "self", ",", "pushType", "=", "\"Press\"", ")", ":", "power_settings", "=", "{", "\"Action\"", ":", "\"PowerButton\"", ",", "\"Target\"", ":", "\"/Oem/Hp\"", ",", "\"PushType\"", ":", "pushType", "}", "systems_uri", "=", "\"/rest/v1...
Simulates a physical press of the server power button. :param pushType: Type of power button press to simulate Supported values are: 'Press' and 'PressAndHold' :raises: IloError, on an error from iLO.
[ "Simulates", "a", "physical", "press", "of", "the", "server", "power", "button", "." ]
python
train
41.277778
hubo1016/vlcp
vlcp/utils/http.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/http.py#L690-L708
def route(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']): ''' Route specified path to a WSGI-styled routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env), env is an Environment object see also utils.http.Environment :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods ''' self.routeevent(path, statichttp(container)(routinemethod), container, host, vhost, method)
[ "def", "route", "(", "self", ",", "path", ",", "routinemethod", ",", "container", "=", "None", ",", "host", "=", "None", ",", "vhost", "=", "None", ",", "method", "=", "[", "b'GET'", ",", "b'HEAD'", "]", ")", ":", "self", ".", "routeevent", "(", "p...
Route specified path to a WSGI-styled routine factory :param path: path to match, can be a regular expression :param routinemethod: factory function routinemethod(env), env is an Environment object see also utils.http.Environment :param container: routine container :param host: if specified, only response to request to specified host :param vhost: if specified, only response to request to specified vhost. If not specified, response to dispatcher default vhost. :param method: if specified, response to specified methods
[ "Route", "specified", "path", "to", "a", "WSGI", "-", "styled", "routine", "factory", ":", "param", "path", ":", "path", "to", "match", "can", "be", "a", "regular", "expression", ":", "param", "routinemethod", ":", "factory", "function", "routinemethod", "("...
python
train
46.789474
LasLabs/python-five9
five9/environment.py
https://github.com/LasLabs/python-five9/blob/ef53160d6658604524a2577391280d2b4501a7ce/five9/environment.py#L12-L21
def model(method): """Use this to decorate methods that expect a model.""" def wrapper(self, *args, **kwargs): if self.__model__ is None: raise ValidationError( 'You cannot perform CRUD operations without selecting a ' 'model first.', ) return method(self, *args, **kwargs) return wrapper
[ "def", "model", "(", "method", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "__model__", "is", "None", ":", "raise", "ValidationError", "(", "'You cannot perform CRUD operations without sele...
Use this to decorate methods that expect a model.
[ "Use", "this", "to", "decorate", "methods", "that", "expect", "a", "model", "." ]
python
train
39.9
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L433-L444
def set_meta_all(self, props): """Set metadata values for collection. ``props`` a dict with values for properties. """ delta_props = self.get_meta() for key in delta_props.keys(): if key not in props: delta_props[key] = None delta_props.update(props) self.set_meta(delta_props)
[ "def", "set_meta_all", "(", "self", ",", "props", ")", ":", "delta_props", "=", "self", ".", "get_meta", "(", ")", "for", "key", "in", "delta_props", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "props", ":", "delta_props", "[", "key", "]", ...
Set metadata values for collection. ``props`` a dict with values for properties.
[ "Set", "metadata", "values", "for", "collection", "." ]
python
train
29.333333
bmweiner/skillful
skillful/validate.py
https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/validate.py#L123-L159
def cert_chain(certs): """Validate PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: certs: list. The certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates. See `validate.retrieve` to create certs obj. Returns: bool: True if valid, False otherwise. """ if len(certs) < 2: warnings.warn('Certificate chain contains < 3 certificates.') return False cert = certs[0] today = datetime.datetime.today() if not today > cert.not_valid_before: warnings.warn('Certificate Not Before date is invalid.') return False if not today < cert.not_valid_after: warnings.warn('Certificate Not After date is invalid.') return False oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ext = cert.extensions.get_extension_for_oid(oid_san) sans = ext.value.get_values_for_type(x509.DNSName) if not 'echo-api.amazon.com' in sans: return False for i in range(len(certs) - 1): if not certs[i].issuer == certs[i + 1].subject: return False return True
[ "def", "cert_chain", "(", "certs", ")", ":", "if", "len", "(", "certs", ")", "<", "2", ":", "warnings", ".", "warn", "(", "'Certificate chain contains < 3 certificates.'", ")", "return", "False", "cert", "=", "certs", "[", "0", "]", "today", "=", "datetime...
Validate PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: certs: list. The certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates. See `validate.retrieve` to create certs obj. Returns: bool: True if valid, False otherwise.
[ "Validate", "PEM", "-", "encoded", "X", ".", "509", "certificate", "chain", "." ]
python
train
31.351351
antevens/listen
listen/signal_handler.py
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L65-L92
def default_handler(self, signum, frame): """ Default handler, a generic callback method for signal processing""" self.log.debug("Signal handler called with signal: {0}".format(signum)) # 1. If signal is HUP restart the python process # 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1 # 3. If signal is STOP or TSTP we pause # 4. If signal is CONT or USR1 we continue # 5. If signal is INFO we print status # 6. If signal is USR2 we we abort and then exit with -1 if signum in self.restart_signals: self.set_handler(self.handled_signals, self.pseudo_handler) self._cleanup() os.execl('python', 'python', * sys.argv) elif signum in self.abort_signals: self.abort(signum) elif signum in self.pause_signals: self.pause(signum) elif signum in self.resume_signals: self.resume(signum) elif signum in self.status_signals: self.status(signum) elif signum in self.error_signals: self.log.error('Signal handler received error signal from an external process, aborting') self.abort(signum) else: self.log.error("Unhandled signal received: {0}".format(signum)) raise
[ "def", "default_handler", "(", "self", ",", "signum", ",", "frame", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Signal handler called with signal: {0}\"", ".", "format", "(", "signum", ")", ")", "# 1. If signal is HUP restart the python process", "# 2. If sign...
Default handler, a generic callback method for signal processing
[ "Default", "handler", "a", "generic", "callback", "method", "for", "signal", "processing" ]
python
test
46.535714
asyrjasalo/RESTinstance
src/REST/keywords.py
https://github.com/asyrjasalo/RESTinstance/blob/9b003ffc6a89ec4b8b6f05eeb6cc8e56aad4be4e/src/REST/keywords.py#L1053-L1130
def output_schema( self, what="", file_path=None, append=False, sort_keys=False ): """*Outputs JSON Schema to terminal or a file.* By default, the schema is output for the last request and response. The output can be limited further by: - The property of the last instance, e.g. ``request`` or ``response`` - Any nested property that exists, similarly as for assertion keywords Also variables and values that can be converted to JSON are accepted, in which case the schema is generated for those instead. *Options* ``file_path``: The JSON Schema is written to a file instead of terminal. The file is created if it does not exist. ``append``: If true, the JSON Schema is appended to the given file instead of truncating it first. ``sort_keys``: If true, the JSON Schema is sorted alphabetically by property names before it is output. *Examples* | `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` | | `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` | | `Output Schema` | $.email | # only the schema for one response body property | | `Output Schema` | $..geo | # only the schema for the nested response body property | """ if isinstance(what, (STRING_TYPES)): if what == "": try: json = self._last_instance_or_error()["schema"] except IndexError: raise RuntimeError(no_instances_error) elif what.startswith(("request", "response", "$")): self._last_instance_or_error() matches = self._find_by_field(what) if len(matches) > 1: json = [found["schema"] for found in matches] else: json = matches[0]["schema"] else: try: json = self._new_schema(self._input_json_as_string(what)) except ValueError: json = self._new_schema(self._input_string(what)) else: json = self._new_schema(self._input_json_from_non_string(what)) sort_keys = self._input_boolean(sort_keys) if not file_path: self.log_json(json, sort_keys=sort_keys) else: content = dumps( json, ensure_ascii=False, indent=4, separators=(",", ": "), sort_keys=sort_keys, ) write_mode = "a" if self._input_boolean(append) else "w" try: with open( path.join(getcwd(), file_path), write_mode, encoding="utf-8" ) as file: if IS_PYTHON_2: content = unicode(content) file.write(content) except IOError as e: raise RuntimeError( "Error outputting to file '%s':\n%s" % (file_path, e) ) return json
[ "def", "output_schema", "(", "self", ",", "what", "=", "\"\"", ",", "file_path", "=", "None", ",", "append", "=", "False", ",", "sort_keys", "=", "False", ")", ":", "if", "isinstance", "(", "what", ",", "(", "STRING_TYPES", ")", ")", ":", "if", "what...
*Outputs JSON Schema to terminal or a file.* By default, the schema is output for the last request and response. The output can be limited further by: - The property of the last instance, e.g. ``request`` or ``response`` - Any nested property that exists, similarly as for assertion keywords Also variables and values that can be converted to JSON are accepted, in which case the schema is generated for those instead. *Options* ``file_path``: The JSON Schema is written to a file instead of terminal. The file is created if it does not exist. ``append``: If true, the JSON Schema is appended to the given file instead of truncating it first. ``sort_keys``: If true, the JSON Schema is sorted alphabetically by property names before it is output. *Examples* | `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` | | `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` | | `Output Schema` | $.email | # only the schema for one response body property | | `Output Schema` | $..geo | # only the schema for the nested response body property |
[ "*", "Outputs", "JSON", "Schema", "to", "terminal", "or", "a", "file", ".", "*" ]
python
train
40.230769
mfcloud/python-zvm-sdk
zvmsdk/vmops.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/vmops.py#L108-L112
def guest_start(self, userid): """"Power on z/VM instance.""" LOG.info("Begin to power on vm %s", userid) self._smtclient.guest_start(userid) LOG.info("Complete power on vm %s", userid)
[ "def", "guest_start", "(", "self", ",", "userid", ")", ":", "LOG", ".", "info", "(", "\"Begin to power on vm %s\"", ",", "userid", ")", "self", ".", "_smtclient", ".", "guest_start", "(", "userid", ")", "LOG", ".", "info", "(", "\"Complete power on vm %s\"", ...
Power on z/VM instance.
[ "Power", "on", "z", "/", "VM", "instance", "." ]
python
train
42.6
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L3276-L3290
def _is_indirect_jump(_, sim_successors): """ Determine if this SimIRSB has an indirect jump as its exit """ if sim_successors.artifacts['irsb_direct_next']: # It's a direct jump return False default_jumpkind = sim_successors.artifacts['irsb_default_jumpkind'] if default_jumpkind not in ('Ijk_Call', 'Ijk_Boring', 'Ijk_InvalICache'): # It's something else, like a ret of a syscall... we don't care about it return False return True
[ "def", "_is_indirect_jump", "(", "_", ",", "sim_successors", ")", ":", "if", "sim_successors", ".", "artifacts", "[", "'irsb_direct_next'", "]", ":", "# It's a direct jump", "return", "False", "default_jumpkind", "=", "sim_successors", ".", "artifacts", "[", "'irsb_...
Determine if this SimIRSB has an indirect jump as its exit
[ "Determine", "if", "this", "SimIRSB", "has", "an", "indirect", "jump", "as", "its", "exit" ]
python
train
35
widdowquinn/pyani
pyani/pyani_graphics.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_graphics.py#L312-L375
def heatmap_mpl(dfr, outfilename=None, title=None, params=None): """Returns matplotlib heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) - params - a list of parameters for plotting: [colormap, vmin, vmax] - labels - dictionary of alternative labels, keyed by default sequence labels - classes - dictionary of sequence classes, keyed by default sequence labels """ # Layout figure grid and add title # Set figure size by the number of rows in the dataframe figsize = max(8, dfr.shape[0] * 0.175) fig = plt.figure(figsize=(figsize, figsize)) # if title: # fig.suptitle(title) heatmap_gs = gridspec.GridSpec( 2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1] ) # Add column and row dendrograms/axes to figure coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col") rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row") # Add heatmap axes to figure, with rows/columns as in the dendrograms heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs) ax_map = heatmap_axes.imshow( dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]], interpolation="nearest", cmap=params.cmap, origin="lower", vmin=params.vmin, vmax=params.vmax, aspect="auto", ) # Are there class colourbars to add? if params.classes is not None: add_mpl_colorbar(dfr, fig, coldend, params, orientation="col") add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row") # Add heatmap labels add_mpl_labels( heatmap_axes, dfr.index[rowdend["dendrogram"]["leaves"]], dfr.index[coldend["dendrogram"]["leaves"]], params, ) # Add colour scale add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title) # Return figure output, and write, if required plt.subplots_adjust(top=0.85) # Leave room for title # fig.set_tight_layout(True) # We know that there is a UserWarning here about tight_layout and # using the Agg renderer on OSX, so catch and ignore it, for cleanliness. with warnings.catch_warnings(): warnings.simplefilter("ignore") heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5) if outfilename: fig.savefig(outfilename) return fig
[ "def", "heatmap_mpl", "(", "dfr", ",", "outfilename", "=", "None", ",", "title", "=", "None", ",", "params", "=", "None", ")", ":", "# Layout figure grid and add title", "# Set figure size by the number of rows in the dataframe", "figsize", "=", "max", "(", "8", ","...
Returns matplotlib heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) - params - a list of parameters for plotting: [colormap, vmin, vmax] - labels - dictionary of alternative labels, keyed by default sequence labels - classes - dictionary of sequence classes, keyed by default sequence labels
[ "Returns", "matplotlib", "heatmap", "with", "cluster", "dendrograms", "." ]
python
train
37.953125
saltstack/salt
salt/returners/influxdb_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/influxdb_return.py#L272-L291
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' serv = _get_serv(ret=None) sql = '''select first(id) as fid, first(full_ret) as fret from returns where fun = '{0}' group by fun, id '''.format(fun) data = serv.query(sql) ret = {} if data: points = data[0]['points'] for point in points: ret[point[1]] = salt.utils.json.loads(point[2]) return ret
[ "def", "get_fun", "(", "fun", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "sql", "=", "'''select first(id) as fid, first(full_ret) as fret\n from returns\n where fun = '{0}'\n group by fun, id\n '''", ".", "format", ...
Return a dict of the last function called for all minions
[ "Return", "a", "dict", "of", "the", "last", "function", "called", "for", "all", "minions" ]
python
train
23.7
PythonicNinja/pydrill
pydrill/client/__init__.py
https://github.com/PythonicNinja/pydrill/blob/0713e78c84d44cd438018e4ba1588a8e242f78c4/pydrill/client/__init__.py#L157-L172
def storage_detail(self, name, timeout=10): """ Get the definition of the named storage plugin. :param name: The assigned name in the storage plugin definition. :param timeout: int :return: pydrill.client.Result """ result = Result(*self.perform_request(**{ 'method': 'GET', 'url': '/storage/{0}.json'.format(name), 'params': { 'request_timeout': timeout } })) return result
[ "def", "storage_detail", "(", "self", ",", "name", ",", "timeout", "=", "10", ")", ":", "result", "=", "Result", "(", "*", "self", ".", "perform_request", "(", "*", "*", "{", "'method'", ":", "'GET'", ",", "'url'", ":", "'/storage/{0}.json'", ".", "for...
Get the definition of the named storage plugin. :param name: The assigned name in the storage plugin definition. :param timeout: int :return: pydrill.client.Result
[ "Get", "the", "definition", "of", "the", "named", "storage", "plugin", "." ]
python
train
31
rosshamish/hexgrid
hexgrid.py
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L386-L398
def nodes_touching_edge(edge_coord): """ Returns the two node coordinates which are on the given edge coordinate. :return: list of 2 node coordinates which are on the given edge coordinate, list(int) """ a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2) if a % 2 == 0 and b % 2 == 0: return [coord_from_hex_digits(a, b + 1), coord_from_hex_digits(a + 1, b)] else: return [coord_from_hex_digits(a, b), coord_from_hex_digits(a + 1, b + 1)]
[ "def", "nodes_touching_edge", "(", "edge_coord", ")", ":", "a", ",", "b", "=", "hex_digit", "(", "edge_coord", ",", "1", ")", ",", "hex_digit", "(", "edge_coord", ",", "2", ")", "if", "a", "%", "2", "==", "0", "and", "b", "%", "2", "==", "0", ":"...
Returns the two node coordinates which are on the given edge coordinate. :return: list of 2 node coordinates which are on the given edge coordinate, list(int)
[ "Returns", "the", "two", "node", "coordinates", "which", "are", "on", "the", "given", "edge", "coordinate", "." ]
python
train
39.230769
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_applicationmanagement.py
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_applicationmanagement.py#L39-L56
def open_application(self, remote_url, alias=None, **kwargs): """Opens a new application to given Appium server. Capabilities of appium server, Android and iOS, Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md | *Option* | *Man.* | *Description* | | remote_url | Yes | Appium server url | | alias | no | alias | Examples: | Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app | | Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity | """ desired_caps = kwargs application = webdriver.Remote(str(remote_url), desired_caps) self._debug('Opened application with session id %s' % application.session_id) return self._cache.register(application, alias)
[ "def", "open_application", "(", "self", ",", "remote_url", ",", "alias", "=", "None", ",", "*", "*", "kwargs", ")", ":", "desired_caps", "=", "kwargs", "application", "=", "webdriver", ".", "Remote", "(", "str", "(", "remote_url", ")", ",", "desired_caps",...
Opens a new application to given Appium server. Capabilities of appium server, Android and iOS, Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md | *Option* | *Man.* | *Description* | | remote_url | Yes | Appium server url | | alias | no | alias | Examples: | Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app | | Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity |
[ "Opens", "a", "new", "application", "to", "given", "Appium", "server", ".", "Capabilities", "of", "appium", "server", "Android", "and", "iOS", "Please", "check", "https", ":", "//", "github", ".", "com", "/", "appium", "/", "appium", "/", "blob", "/", "m...
python
train
67.5
quora/qcore
qcore/caching.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/caching.py#L383-L445
def memoize_with_ttl(ttl_secs=60 * 60 * 24): """Memoizes return values of the decorated function for a given time-to-live. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function or the time-to-live expires. By default, the time-to-live is set to 24 hours. """ error_msg = ( "Incorrect usage of qcore.caching.memoize_with_ttl: " "ttl_secs must be a positive integer." ) assert_is_instance(ttl_secs, six.integer_types, error_msg) assert_gt(ttl_secs, 0, error_msg) def cache_fun(fun): argspec = inspect2.getfullargspec(fun) arg_names = argspec.args + argspec.kwonlyargs kwargs_defaults = get_kwargs_defaults(argspec) def cache_key(args, kwargs): return repr(get_args_tuple(args, kwargs, arg_names, kwargs_defaults)) @functools.wraps(fun) def new_fun(*args, **kwargs): k = cache_key(args, kwargs) current_time = int(time.time()) # k is not in the cache; perform the function and cache the result. if k not in new_fun.__cache or k not in new_fun.__cache_times: new_fun.__cache[k] = fun(*args, **kwargs) new_fun.__cache_times[k] = current_time return new_fun.__cache[k] # k is in the cache at this point. Check if the ttl has expired; # if so, recompute the value and cache it. cache_time = new_fun.__cache_times[k] if current_time - cache_time > ttl_secs: new_fun.__cache[k] = fun(*args, **kwargs) new_fun.__cache_times[k] = current_time # finally, return the cached result. return new_fun.__cache[k] def clear_cache(): """Removes all cached values for this function.""" new_fun.__cache.clear() new_fun.__cache_times.clear() def dirty(*args, **kwargs): """Dirties the function for a given set of arguments.""" k = cache_key(args, kwargs) new_fun.__cache.pop(k, None) new_fun.__cache_times.pop(k, None) new_fun.__cache = {} new_fun.__cache_times = {} new_fun.clear_cache = clear_cache new_fun.dirty = dirty return new_fun return cache_fun
[ "def", "memoize_with_ttl", "(", "ttl_secs", "=", "60", "*", "60", "*", "24", ")", ":", "error_msg", "=", "(", "\"Incorrect usage of qcore.caching.memoize_with_ttl: \"", "\"ttl_secs must be a positive integer.\"", ")", "assert_is_instance", "(", "ttl_secs", ",", "six", "...
Memoizes return values of the decorated function for a given time-to-live. Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache() is called on the function or the time-to-live expires. By default, the time-to-live is set to 24 hours.
[ "Memoizes", "return", "values", "of", "the", "decorated", "function", "for", "a", "given", "time", "-", "to", "-", "live", "." ]
python
train
36.68254
Parquery/icontract
icontract/_recompute.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_recompute.py#L396-L404
def visit_DictComp(self, node: ast.DictComp) -> Any: """Compile the dictionary comprehension as a function and call it.""" result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
[ "def", "visit_DictComp", "(", "self", ",", "node", ":", "ast", ".", "DictComp", ")", "->", "Any", ":", "result", "=", "self", ".", "_execute_comprehension", "(", "node", "=", "node", ")", "for", "generator", "in", "node", ".", "generators", ":", "self", ...
Compile the dictionary comprehension as a function and call it.
[ "Compile", "the", "dictionary", "comprehension", "as", "a", "function", "and", "call", "it", "." ]
python
train
36.555556
noahbenson/neuropythy
neuropythy/io/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/io/core.py#L110-L123
def forget_importer(name): ''' forget_importer(name) yields True if an importer of type name was successfully forgotten from the neuropythy importers list and false otherwise. This function must be called before an importer can be replaced. ''' global importers name = name.lower() if name in importers: importers = importers.discard(name) delattr(load, name) return True else: return False
[ "def", "forget_importer", "(", "name", ")", ":", "global", "importers", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "in", "importers", ":", "importers", "=", "importers", ".", "discard", "(", "name", ")", "delattr", "(", "load", ",", "na...
forget_importer(name) yields True if an importer of type name was successfully forgotten from the neuropythy importers list and false otherwise. This function must be called before an importer can be replaced.
[ "forget_importer", "(", "name", ")", "yields", "True", "if", "an", "importer", "of", "type", "name", "was", "successfully", "forgotten", "from", "the", "neuropythy", "importers", "list", "and", "false", "otherwise", ".", "This", "function", "must", "be", "call...
python
train
32.071429
jaraco/keyring
keyring/util/platform_.py
https://github.com/jaraco/keyring/blob/71c798378e365286b7cc03c06e4d7d24c7de8fc4/keyring/util/platform_.py#L51-L60
def _config_root_Linux(): """ Use freedesktop.org Base Dir Specfication to determine config location. """ _check_old_config_root() fallback = os.path.expanduser('~/.local/share') key = 'XDG_CONFIG_HOME' root = os.environ.get(key, None) or fallback return os.path.join(root, 'python_keyring')
[ "def", "_config_root_Linux", "(", ")", ":", "_check_old_config_root", "(", ")", "fallback", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.local/share'", ")", "key", "=", "'XDG_CONFIG_HOME'", "root", "=", "os", ".", "environ", ".", "get", "(", "key", ...
Use freedesktop.org Base Dir Specfication to determine config location.
[ "Use", "freedesktop", ".", "org", "Base", "Dir", "Specfication", "to", "determine", "config", "location", "." ]
python
valid
31.8
tensorflow/tensor2tensor
tensor2tensor/insights/server.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/insights/server.py#L79-L84
def load_config(self): """Loads the configuration.""" config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value)
[ "def", "load_config", "(", "self", ")", ":", "config", "=", "dict", "(", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "iteritems", "(", "self", ".", "options", ")", "if", "key", "in", "self", ".", "cfg", ".", "settings", ...
Loads the configuration.
[ "Loads", "the", "configuration", "." ]
python
train
46.166667
bblfsh/client-python
bblfsh/compat.py
https://github.com/bblfsh/client-python/blob/815835d191d5e385973f3c685849cc3b46aa20a5/bblfsh/compat.py#L230-L237
def properties(self) -> dict: """ Returns the properties of the current node in the iteration. """ if isinstance(self._last_node, dict): return self._last_node.keys() else: return {}
[ "def", "properties", "(", "self", ")", "->", "dict", ":", "if", "isinstance", "(", "self", ".", "_last_node", ",", "dict", ")", ":", "return", "self", ".", "_last_node", ".", "keys", "(", ")", "else", ":", "return", "{", "}" ]
Returns the properties of the current node in the iteration.
[ "Returns", "the", "properties", "of", "the", "current", "node", "in", "the", "iteration", "." ]
python
train
29.875
IBM-Cloud/gp-python-client
gpclient/gpclient.py
https://github.com/IBM-Cloud/gp-python-client/blob/082c6cdc250fb61bea99cba8ac3ee855ee77a410/gpclient/gpclient.py#L335-L348
def __get_bundle_data(self, bundleId): """``GET /{serviceInstanceId}/v2/bundles/{bundleId}`` Gets the bundle's information. """ url = self.__get_base_bundle_url() + '/' + bundleId response = self.__perform_rest_call(requestURL=url) if not response: return None bundleData = response.get(self.__RESPONSE_BUNDLE_KEY) return bundleData
[ "def", "__get_bundle_data", "(", "self", ",", "bundleId", ")", ":", "url", "=", "self", ".", "__get_base_bundle_url", "(", ")", "+", "'/'", "+", "bundleId", "response", "=", "self", ".", "__perform_rest_call", "(", "requestURL", "=", "url", ")", "if", "not...
``GET /{serviceInstanceId}/v2/bundles/{bundleId}`` Gets the bundle's information.
[ "GET", "/", "{", "serviceInstanceId", "}", "/", "v2", "/", "bundles", "/", "{", "bundleId", "}", "Gets", "the", "bundle", "s", "information", "." ]
python
train
29.428571
edx/edx-enterprise
enterprise/api/v1/serializers.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/serializers.py#L694-L707
def validate(self, data): # pylint: disable=arguments-differ """ Validate that at least one of the user identifier fields has been passed in. """ lms_user_id = data.get('lms_user_id') tpa_user_id = data.get('tpa_user_id') user_email = data.get('user_email') if not lms_user_id and not tpa_user_id and not user_email: raise serializers.ValidationError( 'At least one of the following fields must be specified and map to an EnterpriseCustomerUser: ' 'lms_user_id, tpa_user_id, user_email' ) return data
[ "def", "validate", "(", "self", ",", "data", ")", ":", "# pylint: disable=arguments-differ", "lms_user_id", "=", "data", ".", "get", "(", "'lms_user_id'", ")", "tpa_user_id", "=", "data", ".", "get", "(", "'tpa_user_id'", ")", "user_email", "=", "data", ".", ...
Validate that at least one of the user identifier fields has been passed in.
[ "Validate", "that", "at", "least", "one", "of", "the", "user", "identifier", "fields", "has", "been", "passed", "in", "." ]
python
valid
43.5
aeroxis/sultan
src/sultan/result.py
https://github.com/aeroxis/sultan/blob/65b4271a161d6c19a9eb0170b5a95832a139ab7f/src/sultan/result.py#L245-L253
def print_traceback(self, always_print=False): """ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) """ if self._exception or always_print: self.__echo.critical("--{ TRACEBACK }" + "-" * 100) self.__format_lines_error(self.traceback) self.__echo.critical("---------------" + "-" * 100)
[ "def", "print_traceback", "(", "self", ",", "always_print", "=", "False", ")", ":", "if", "self", ".", "_exception", "or", "always_print", ":", "self", ".", "__echo", ".", "critical", "(", "\"--{ TRACEBACK }\"", "+", "\"-\"", "*", "100", ")", "self", ".", ...
Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false)
[ "Prints", "the", "traceback", "to", "console", "-", "if", "there", "is", "any", "traceback", "otherwise", "does", "nothing", ".", ":", "param", "always_print", ":", "print", "the", "traceback", "even", "if", "there", "is", "nothing", "in", "the", "buffer", ...
python
valid
54.333333
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L4919-L4930
def Zoom(self, zoomLevel: float, waitTime: float = OPERATION_WAIT_TIME) -> bool: """ Call IUIAutomationTransformPattern2::Zoom. Zoom the viewport of the control. zoomLevel: float for int. waitTime: float. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom """ ret = self.pattern.Zoom(zoomLevel) == S_OK time.sleep(waitTime) return ret
[ "def", "Zoom", "(", "self", ",", "zoomLevel", ":", "float", ",", "waitTime", ":", "float", "=", "OPERATION_WAIT_TIME", ")", "->", "bool", ":", "ret", "=", "self", ".", "pattern", ".", "Zoom", "(", "zoomLevel", ")", "==", "S_OK", "time", ".", "sleep", ...
Call IUIAutomationTransformPattern2::Zoom. Zoom the viewport of the control. zoomLevel: float for int. waitTime: float. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom
[ "Call", "IUIAutomationTransformPattern2", "::", "Zoom", ".", "Zoom", "the", "viewport", "of", "the", "control", ".", "zoomLevel", ":", "float", "for", "int", ".", "waitTime", ":", "float", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", ...
python
valid
45.166667
numenta/nupic
src/nupic/swarming/hypersearch_v2.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_v2.py#L1700-L2016
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None): """Find or create a candidate particle to produce a new model. At any one time, there is an active set of swarms in the current sprint, where each swarm in the sprint represents a particular combination of fields. Ideally, we should try to balance the number of models we have evaluated for each swarm at any time. This method will see how many models have been evaluated for each active swarm in the current active sprint(s) and then try and choose a particle from the least represented swarm in the first possible active sprint, with the following constraints/rules: for each active sprint: for each active swarm (preference to those with least# of models so far): 1.) The particle will be created from new (generation #0) if there are not already self._minParticlesPerSwarm particles in the swarm. 2.) Find the first gen that has a completed particle and evolve that particle to the next generation. 3.) If we got to here, we know that we have satisfied the min# of particles for the swarm, and they are all currently running (probably at various generation indexes). Go onto the next swarm If we couldn't find a swarm to allocate a particle in, go onto the next sprint and start allocating particles there.... Parameters: ---------------------------------------------------------------- exhaustedSwarmId: If not None, force a change to the current set of active swarms by marking this swarm as either 'completing' or 'completed'. If there are still models being evaluaed in it, mark it as 'completing', else 'completed. This is used in situations where we can't find any new unique models to create in this swarm. In these situations, we force an update to the hypersearch state so no other worker wastes time try to use this swarm. retval: (exit, particle, swarm) exit: If true, this worker is ready to exit (particle and swarm will be None) particle: Which particle to run swarm: which swarm the particle is in NOTE: When particle and swarm are None and exit is False, it means that we need to wait for one or more other worker(s) to finish their respective models before we can pick a particle to run. This will generally only happen when speculativeParticles is set to False. """ # Cancel search? jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0] if jobCancel: self._jobCancelled = True # Did a worker cancel the job because of an error? (workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID, ['workerCompletionReason', 'workerCompletionMsg']) if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS: self.logger.info("Exiting due to job being cancelled") self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg="Job was cancelled"), useConnectionID=False, ignoreUnchanged=True) else: self.logger.error("Exiting because some worker set the " "workerCompletionReason to %s. WorkerCompletionMsg: %s" % (workerCmpReason, workerCmpMsg)) return (True, None, None) # Perform periodic updates on the Hypersearch state. if self._hsState is not None: priorActiveSwarms = self._hsState.getActiveSwarms() else: priorActiveSwarms = None # Update the HypersearchState, checking for matured swarms, and marking # the passed in swarm as exhausted, if any self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId) # The above call may have modified self._hsState['activeSwarmIds'] # Log the current set of active swarms activeSwarms = self._hsState.getActiveSwarms() if activeSwarms != priorActiveSwarms: self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms, priorActiveSwarms)) self.logger.debug("Active swarms: %s" % (activeSwarms)) # If too many model errors were detected, exit totalCmpModels = self._resultsDB.getNumCompletedModels() if totalCmpModels > 5: numErrs = self._resultsDB.getNumErrModels() if (float(numErrs) / totalCmpModels) > self._maxPctErrModels: # Get one of the errors errModelIds = self._resultsDB.getErrModelIds() resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0] modelErrMsg = resInfo.completionMsg cmpMsg = "%s: Exiting due to receiving too many models failing" \ " from exceptions (%d out of %d). \nModel Exception: %s" % \ (ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels, modelErrMsg) self.logger.error(cmpMsg) # Cancel the entire job now, if it has not already been cancelled workerCmpReason = self._cjDAO.jobGetFields(self._jobID, ['workerCompletionReason'])[0] if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS: self._cjDAO.jobSetFields( self._jobID, fields=dict( cancel=True, workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR, workerCompletionMsg = cmpMsg), useConnectionID=False, ignoreUnchanged=True) return (True, None, None) # If HsState thinks the search is over, exit. It is seeing if the results # on the sprint we just completed are worse than a prior sprint. if self._hsState.isSearchOver(): cmpMsg = "Exiting because results did not improve in most recently" \ " completed sprint." self.logger.info(cmpMsg) self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg=cmpMsg), useConnectionID=False, ignoreUnchanged=True) return (True, None, None) # Search successive active sprints, until we can find a candidate particle # to work with sprintIdx = -1 while True: # Is this sprint active? sprintIdx += 1 (active, eos) = self._hsState.isSprintActive(sprintIdx) # If no more sprints to explore: if eos: # If any prior ones are still being explored, finish up exploring them if self._hsState.anyGoodSprintsActive(): self.logger.info("No more sprints to explore, waiting for prior" " sprints to complete") return (False, None, None) # Else, we're done else: cmpMsg = "Exiting because we've evaluated all possible field " \ "combinations" self._cjDAO.jobSetFields(self._jobID, dict(workerCompletionMsg=cmpMsg), useConnectionID=False, ignoreUnchanged=True) self.logger.info(cmpMsg) return (True, None, None) if not active: if not self._speculativeParticles: if not self._hsState.isSprintCompleted(sprintIdx): self.logger.info("Waiting for all particles in sprint %d to complete" "before evolving any more particles" % (sprintIdx)) return (False, None, None) continue # ==================================================================== # Look for swarms that have particle "holes" in their generations. That is, # an earlier generation with less than minParticlesPerSwarm. This can # happen if a model that was started eariler got orphaned. If we detect # this, start a new particle in that generation. swarmIds = self._hsState.getActiveSwarms(sprintIdx) for swarmId in swarmIds: firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration( swarmId=swarmId, minNumParticles=self._minParticlesPerSwarm) if firstNonFullGenIdx is None: continue if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId): self.logger.info("Cloning an earlier model in generation %d of swarm " "%s (sprintIdx=%s) to replace an orphaned model" % ( firstNonFullGenIdx, swarmId, sprintIdx)) # Clone a random orphaned particle from the incomplete generation (allParticles, allModelIds, errScores, completed, matured) = \ self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx) if len(allModelIds) > 0: # We have seen instances where we get stuck in a loop incessantly # trying to clone earlier models (NUP-1511). My best guess is that # we've already successfully cloned each of the orphaned models at # least once, but still need at least one more. If we don't create # a new particleID, we will never be able to instantiate another # model (since particleID hash is a unique key in the models table). # So, on 1/8/2013 this logic was changed to create a new particleID # whenever we clone an orphan. newParticleId = True self.logger.info("Cloning an orphaned model") # If there is no orphan, clone one of the other particles. We can # have no orphan if this was a speculative generation that only # continued particles completed in the prior generation. else: newParticleId = True self.logger.info("No orphans found, so cloning a non-orphan") (allParticles, allModelIds, errScores, completed, matured) = \ self._resultsDB.getParticleInfos(swarmId=swarmId, genIdx=firstNonFullGenIdx) # Clone that model modelId = random.choice(allModelIds) self.logger.info("Cloning model %r" % (modelId)) (particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId) particle = Particle(hsObj = self, resultsDB = self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, newFromClone=particleState, newParticleId=newParticleId) return (False, particle, swarmId) # ==================================================================== # Sort the swarms in priority order, trying the ones with the least # number of models first swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds]) swarmSizeAndIdList = zip(swarmSizes, swarmIds) swarmSizeAndIdList.sort() for (_, swarmId) in swarmSizeAndIdList: # ------------------------------------------------------------------- # 1.) The particle will be created from new (at generation #0) if there # are not already self._minParticlesPerSwarm particles in the swarm. (allParticles, allModelIds, errScores, completed, matured) = ( self._resultsDB.getParticleInfos(swarmId)) if len(allParticles) < self._minParticlesPerSwarm: particle = Particle(hsObj=self, resultsDB=self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, swarmId=swarmId, newFarFrom=allParticles) # Jam in the best encoder state found from the first sprint bestPriorModel = None if sprintIdx >= 1: (bestPriorModel, errScore) = self._hsState.bestModelInSprint(0) if bestPriorModel is not None: self.logger.info("Best model and errScore from previous sprint(%d):" " %s, %g" % (0, str(bestPriorModel), errScore)) (baseState, modelId, errScore, completed, matured) \ = self._resultsDB.getParticleInfo(bestPriorModel) particle.copyEncoderStatesFrom(baseState) # Copy the best inference type from the earlier sprint particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType']) # It's best to jiggle the best settings from the prior sprint, so # compute a new position starting from that previous best # Only jiggle the vars we copied from the prior model whichVars = [] for varName in baseState['varStates']: if ':' in varName: whichVars.append(varName) particle.newPosition(whichVars) self.logger.debug("Particle after incorporating encoder vars from best " "model in previous sprint: \n%s" % (str(particle))) return (False, particle, swarmId) # ------------------------------------------------------------------- # 2.) Look for a completed particle to evolve # Note that we use lastDescendent. We only want to evolve particles that # are at their most recent generation index. (readyParticles, readyModelIds, readyErrScores, _, _) = ( self._resultsDB.getParticleInfos(swarmId, genIdx=None, matured=True, lastDescendent=True)) # If we have at least 1 ready particle to evolve... if len(readyParticles) > 0: readyGenIdxs = [x['genIdx'] for x in readyParticles] sortedGenIdxs = sorted(set(readyGenIdxs)) genIdx = sortedGenIdxs[0] # Now, genIdx has the generation of the particle we want to run, # Get a particle from that generation and evolve it. useParticle = None for particle in readyParticles: if particle['genIdx'] == genIdx: useParticle = particle break # If speculativeParticles is off, we don't want to evolve a particle # into the next generation until all particles in the current # generation have completed. if not self._speculativeParticles: (particles, _, _, _, _) = self._resultsDB.getParticleInfos( swarmId, genIdx=genIdx, matured=False) if len(particles) > 0: continue particle = Particle(hsObj=self, resultsDB=self._resultsDB, flattenedPermuteVars=self._flattenedPermutations, evolveFromState=useParticle) return (False, particle, swarmId) # END: for (swarmSize, swarmId) in swarmSizeAndIdList: # No success in this swarm, onto next swarm # ==================================================================== # We couldn't find a particle in this sprint ready to evolve. If # speculative particles is OFF, we have to wait for one or more other # workers to finish up their particles before we can do anything. if not self._speculativeParticles: self.logger.info("Waiting for one or more of the %s swarms " "to complete a generation before evolving any more particles" \ % (str(swarmIds))) return (False, None, None)
[ "def", "_getCandidateParticleAndSwarm", "(", "self", ",", "exhaustedSwarmId", "=", "None", ")", ":", "# Cancel search?", "jobCancel", "=", "self", ".", "_cjDAO", ".", "jobGetFields", "(", "self", ".", "_jobID", ",", "[", "'cancel'", "]", ")", "[", "0", "]", ...
Find or create a candidate particle to produce a new model. At any one time, there is an active set of swarms in the current sprint, where each swarm in the sprint represents a particular combination of fields. Ideally, we should try to balance the number of models we have evaluated for each swarm at any time. This method will see how many models have been evaluated for each active swarm in the current active sprint(s) and then try and choose a particle from the least represented swarm in the first possible active sprint, with the following constraints/rules: for each active sprint: for each active swarm (preference to those with least# of models so far): 1.) The particle will be created from new (generation #0) if there are not already self._minParticlesPerSwarm particles in the swarm. 2.) Find the first gen that has a completed particle and evolve that particle to the next generation. 3.) If we got to here, we know that we have satisfied the min# of particles for the swarm, and they are all currently running (probably at various generation indexes). Go onto the next swarm If we couldn't find a swarm to allocate a particle in, go onto the next sprint and start allocating particles there.... Parameters: ---------------------------------------------------------------- exhaustedSwarmId: If not None, force a change to the current set of active swarms by marking this swarm as either 'completing' or 'completed'. If there are still models being evaluaed in it, mark it as 'completing', else 'completed. This is used in situations where we can't find any new unique models to create in this swarm. In these situations, we force an update to the hypersearch state so no other worker wastes time try to use this swarm. retval: (exit, particle, swarm) exit: If true, this worker is ready to exit (particle and swarm will be None) particle: Which particle to run swarm: which swarm the particle is in NOTE: When particle and swarm are None and exit is False, it means that we need to wait for one or more other worker(s) to finish their respective models before we can pick a particle to run. This will generally only happen when speculativeParticles is set to False.
[ "Find", "or", "create", "a", "candidate", "particle", "to", "produce", "a", "new", "model", "." ]
python
valid
48.091483
zhanglab/psamm
psamm/metabolicmodel.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/metabolicmodel.py#L352-L356
def lower(self): """Lower bound""" if self._reaction in self._view._flipped: return -super(FlipableFluxBounds, self).upper return super(FlipableFluxBounds, self).lower
[ "def", "lower", "(", "self", ")", ":", "if", "self", ".", "_reaction", "in", "self", ".", "_view", ".", "_flipped", ":", "return", "-", "super", "(", "FlipableFluxBounds", ",", "self", ")", ".", "upper", "return", "super", "(", "FlipableFluxBounds", ",",...
Lower bound
[ "Lower", "bound" ]
python
train
39.8
ArduPilot/MAVProxy
MAVProxy/modules/lib/mp_image.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/mp_image.py#L348-L390
def on_redraw_timer(self, event): '''the redraw timer ensures we show new map tiles as they are downloaded''' state = self.state while state.in_queue.qsize(): try: obj = state.in_queue.get() except Exception: time.sleep(0.05) return if isinstance(obj, MPImageData): with warnings.catch_warnings(): warnings.simplefilter('ignore') img = wx.EmptyImage(obj.width, obj.height) img.SetData(obj.data) self.img = img self.need_redraw = True if state.auto_size: client_area = state.frame.GetClientSize() total_area = state.frame.GetSize() bx = max(total_area.x - client_area.x,0) by = max(total_area.y - client_area.y,0) state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by)) if isinstance(obj, MPImageTitle): state.frame.SetTitle(obj.title) if isinstance(obj, MPImageRecenter): self.on_recenter(obj.location) if isinstance(obj, MPImageMenu): self.set_menu(obj.menu) if isinstance(obj, MPImagePopupMenu): self.set_popup_menu(obj.menu) if isinstance(obj, MPImageBrightness): state.brightness = obj.brightness self.need_redraw = True if isinstance(obj, MPImageFullSize): self.full_size() if isinstance(obj, MPImageFitToWindow): self.fit_to_window() if isinstance(obj, win_layout.WinLayout): win_layout.set_wx_window_layout(state.frame, obj) if self.need_redraw: self.redraw()
[ "def", "on_redraw_timer", "(", "self", ",", "event", ")", ":", "state", "=", "self", ".", "state", "while", "state", ".", "in_queue", ".", "qsize", "(", ")", ":", "try", ":", "obj", "=", "state", ".", "in_queue", ".", "get", "(", ")", "except", "Ex...
the redraw timer ensures we show new map tiles as they are downloaded
[ "the", "redraw", "timer", "ensures", "we", "show", "new", "map", "tiles", "as", "they", "are", "downloaded" ]
python
train
42.767442
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L183-L194
def variance_larger_than_standard_deviation(x): """ Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x being larger than 1 :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool """ y = np.var(x) return y > np.sqrt(y)
[ "def", "variance_larger_than_standard_deviation", "(", "x", ")", ":", "y", "=", "np", ".", "var", "(", "x", ")", "return", "y", ">", "np", ".", "sqrt", "(", "y", ")" ]
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x being larger than 1 :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: bool
[ "Boolean", "variable", "denoting", "if", "the", "variance", "of", "x", "is", "greater", "than", "its", "standard", "deviation", ".", "Is", "equal", "to", "variance", "of", "x", "being", "larger", "than", "1" ]
python
train
32.083333
apache/incubator-heron
heron/common/src/python/pex_loader.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/common/src/python/pex_loader.py#L60-L96
def resolve_heron_suffix_issue(abs_pex_path, class_path): """Resolves duplicate package suffix problems When dynamically loading a pex file and a corresponding python class (bolt/spout/topology), if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts with this Heron Instance pex package (heron.instance.src.python...), making the Python interpreter unable to find the target class in a given pex file. This function resolves this issue by individually loading packages with suffix `heron` to avoid this issue. However, if a dependent module/class that is not directly specified under ``class_path`` and has conflicts with other native heron packages, there is a possibility that such a class/module might not be imported correctly. For example, if a given ``class_path`` was ``heron.common.src.module.Class``, but it has a dependent module (such as by import statement), ``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that ``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError. The best way to avoid this issue is NOT to dynamically load a pex file whose top level package name is ``heron``. Note that this method is included because some of the example topologies and tests have to have a pex with its top level package name of ``heron``. """ # import top-level package named `heron` of a given pex file importer = zipimport.zipimporter(abs_pex_path) importer.load_module("heron") # remove 'heron' and the classname to_load_lst = class_path.split('.')[1:-1] loaded = ['heron'] loaded_mod = None for to_load in to_load_lst: sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded))) loaded_mod = sub_importer.load_module(to_load) loaded.append(to_load) return loaded_mod
[ "def", "resolve_heron_suffix_issue", "(", "abs_pex_path", ",", "class_path", ")", ":", "# import top-level package named `heron` of a given pex file", "importer", "=", "zipimport", ".", "zipimporter", "(", "abs_pex_path", ")", "importer", ".", "load_module", "(", "\"heron\"...
Resolves duplicate package suffix problems When dynamically loading a pex file and a corresponding python class (bolt/spout/topology), if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts with this Heron Instance pex package (heron.instance.src.python...), making the Python interpreter unable to find the target class in a given pex file. This function resolves this issue by individually loading packages with suffix `heron` to avoid this issue. However, if a dependent module/class that is not directly specified under ``class_path`` and has conflicts with other native heron packages, there is a possibility that such a class/module might not be imported correctly. For example, if a given ``class_path`` was ``heron.common.src.module.Class``, but it has a dependent module (such as by import statement), ``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that ``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError. The best way to avoid this issue is NOT to dynamically load a pex file whose top level package name is ``heron``. Note that this method is included because some of the example topologies and tests have to have a pex with its top level package name of ``heron``.
[ "Resolves", "duplicate", "package", "suffix", "problems" ]
python
valid
55.594595
closeio/quotequail
quotequail/_html.py
https://github.com/closeio/quotequail/blob/8a3960c033d595b25a8bbc2c340be898e3065b5f/quotequail/_html.py#L374-L387
def indented_tree_line_generator(el, max_lines=None): """ Like tree_line_generator, but yields tuples (start_ref, end_ref, line), where the line already takes the indentation into account by having "> " prepended. If a line already starts with ">", it is escaped ("\\>"). This makes it possible to reliably use methods that analyze plain text to detect quoting. """ gen = tree_line_generator(el, max_lines) for start_ref, end_ref, indentation_level, line in gen: # Escape line if line.startswith('>'): line = '\\' + line yield start_ref, end_ref, '> '*indentation_level + line
[ "def", "indented_tree_line_generator", "(", "el", ",", "max_lines", "=", "None", ")", ":", "gen", "=", "tree_line_generator", "(", "el", ",", "max_lines", ")", "for", "start_ref", ",", "end_ref", ",", "indentation_level", ",", "line", "in", "gen", ":", "# Es...
Like tree_line_generator, but yields tuples (start_ref, end_ref, line), where the line already takes the indentation into account by having "> " prepended. If a line already starts with ">", it is escaped ("\\>"). This makes it possible to reliably use methods that analyze plain text to detect quoting.
[ "Like", "tree_line_generator", "but", "yields", "tuples", "(", "start_ref", "end_ref", "line", ")", "where", "the", "line", "already", "takes", "the", "indentation", "into", "account", "by", "having", ">", "prepended", ".", "If", "a", "line", "already", "start...
python
train
45.357143
resync/resync
resync/mapper.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/mapper.py#L100-L109
def dst_to_src(self, dst_file): """Map destination path to source URI.""" for map in self.mappings: src_uri = map.dst_to_src(dst_file) if (src_uri is not None): return(src_uri) # Must have failed if loop exited raise MapperError( "Unable to translate destination path (%s) " "into a source URI." % (dst_file))
[ "def", "dst_to_src", "(", "self", ",", "dst_file", ")", ":", "for", "map", "in", "self", ".", "mappings", ":", "src_uri", "=", "map", ".", "dst_to_src", "(", "dst_file", ")", "if", "(", "src_uri", "is", "not", "None", ")", ":", "return", "(", "src_ur...
Map destination path to source URI.
[ "Map", "destination", "path", "to", "source", "URI", "." ]
python
train
39.6
senaite/senaite.core
bika/lims/upgrade/v01_01_006.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_01_006.py#L462-L483
def migrateFileFields(portal): """ This function walks over all attachment types and migrates their FileField fields. """ portal_types = [ "Attachment", "ARImport", "Instrument", "InstrumentCertification", "Method", "Multifile", "Report", "ARReport", "SamplePoint"] for portal_type in portal_types: # Do the migration migrate_to_blob( portal, portal_type=portal_type, remove_old_value=True)
[ "def", "migrateFileFields", "(", "portal", ")", ":", "portal_types", "=", "[", "\"Attachment\"", ",", "\"ARImport\"", ",", "\"Instrument\"", ",", "\"InstrumentCertification\"", ",", "\"Method\"", ",", "\"Multifile\"", ",", "\"Report\"", ",", "\"ARReport\"", ",", "\"...
This function walks over all attachment types and migrates their FileField fields.
[ "This", "function", "walks", "over", "all", "attachment", "types", "and", "migrates", "their", "FileField", "fields", "." ]
python
train
23.545455
OLC-Bioinformatics/sipprverse
pointsippr/pointsippr.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointsippr/pointsippr.py#L345-L403
def write_table_report(summary_dict, seqid, genus): """ Parse the PointFinder table output, and write a summary report :param summary_dict: nested dictionary containing data such as header strings, and paths to reports :param seqid: name of the strain, :param genus: MASH-calculated genus of current isolate """ # Set the header string if the summary report doesn't already exist if not os.path.isfile(summary_dict[genus]['table']['summary']): header_string = summary_dict[genus]['table']['header'] else: header_string = str() summary_string = '{seq},'.format(seq=seqid) try: # Read in the predictions with open(summary_dict[genus]['table']['output'], 'r') as outputs: for header_value in summary_dict[genus]['table']['header'].split(',')[:-1]: for line in outputs: if line.startswith('{hv}\n'.format(hv=header_value)): # Iterate through the lines following the match for subline in outputs: if subline != '\n': if subline.startswith('Mutation'): for detailline in outputs: if detailline != '\n': summary_string += '{},'.format(detailline.split('\t')[0]) else: break else: summary_string += '{},'.format( subline.replace(',', ';').replace('\t', ',').rstrip()) break else: break break # Reset the file iterator to the first line in preparation for the next header outputs.seek(0) # Ensure that there were results to report if summary_string: if not summary_string.endswith('\n'): summary_string += '\n' # Write the summaries to the summary file with open(summary_dict[genus]['table']['summary'], 'a+') as summary: # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string) except FileNotFoundError: # Write the summaries to the summary file with open(summary_dict[genus]['table']['summary'], 'a+') as summary: # Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the # empty column created by a trailing comma header_len = len(summary_dict[genus]['table']['header'].split(',')) - 2 # Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries summary_string += '{empty}\n'.format(empty='Gene not found,' * header_len) # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string)
[ "def", "write_table_report", "(", "summary_dict", ",", "seqid", ",", "genus", ")", ":", "# Set the header string if the summary report doesn't already exist", "if", "not", "os", ".", "path", ".", "isfile", "(", "summary_dict", "[", "genus", "]", "[", "'table'", "]",...
Parse the PointFinder table output, and write a summary report :param summary_dict: nested dictionary containing data such as header strings, and paths to reports :param seqid: name of the strain, :param genus: MASH-calculated genus of current isolate
[ "Parse", "the", "PointFinder", "table", "output", "and", "write", "a", "summary", "report", ":", "param", "summary_dict", ":", "nested", "dictionary", "containing", "data", "such", "as", "header", "strings", "and", "paths", "to", "reports", ":", "param", "seqi...
python
train
57.644068
saltstack/salt
salt/states/ipset.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ipset.py#L239-L309
def absent(name, entry=None, entries=None, family='ipv4', **kwargs): ''' .. versionadded:: 2014.7.0 Remove a entry or entries from a chain name A user-defined name to call this entry by in another part of a state or formula. This should not be an actual entry. family Network family, ipv4 or ipv6. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if not entry: ret['result'] = False ret['comment'] = ('ipset entry must be specified') return ret entries = [] if isinstance(entry, list): entries = entry else: entries.append(entry) for entry in entries: entry_opts = '' if ' ' in entry: entry, entry_opts = entry.split(' ', 1) if 'timeout' in kwargs and 'timeout' not in entry_opts: entry_opts = 'timeout {0} {1}'.format(kwargs['timeout'], entry_opts) if 'comment' in kwargs and 'comment' not in entry_opts: entry_opts = '{0} comment "{1}"'.format(entry_opts, kwargs['comment']) _entry = ' '.join([entry, entry_opts]).strip() log.debug('_entry %s', _entry) if not __salt__['ipset.check'](kwargs['set_name'], _entry, family) is True: ret['result'] = True ret['comment'] += 'ipset entry for {0} not present in set {1} for {2}\n'.format( _entry, kwargs['set_name'], family) else: if __opts__['test']: ret['result'] = None ret['comment'] += 'ipset entry {0} would be removed from set {1} for {2}\n'.format( entry, kwargs['set_name'], family) else: command = __salt__['ipset.delete'](kwargs['set_name'], entry, family, **kwargs) if 'Error' not in command: ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] += 'ipset entry {1} removed from set {0} for {2}\n'.format( kwargs['set_name'], _entry, family) else: ret['result'] = False ret['comment'] = 'Failed to delete ipset entry from set {0} for {2}. ' \ 'Attempted entry was {1}.\n' \ '{3}\n'.format(kwargs['set_name'], _entry, family, command) return ret
[ "def", "absent", "(", "name", ",", "entry", "=", "None", ",", "entries", "=", "None", ",", "family", "=", "'ipv4'", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", ...
.. versionadded:: 2014.7.0 Remove a entry or entries from a chain name A user-defined name to call this entry by in another part of a state or formula. This should not be an actual entry. family Network family, ipv4 or ipv6.
[ "..", "versionadded", "::", "2014", ".", "7", ".", "0" ]
python
train
36.394366
dw/mitogen
mitogen/core.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L2627-L2697
def add_handler(self, fn, handle=None, persist=True, policy=None, respondent=None, overwrite=False): """ Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to `handle` from this context. Unregister after one invocation if `persist` is :data:`False`. If `handle` is :data:`None`, a new handle is allocated and returned. :param int handle: If not :data:`None`, an explicit handle to register, usually one of the ``mitogen.core.*`` constants. If unspecified, a new unused handle will be allocated. :param bool persist: If :data:`False`, the handler will be unregistered after a single message has been received. :param Context respondent: Context that messages to this handle are expected to be sent from. If specified, arranges for a dead message to be delivered to `fn` when disconnection of the context is detected. In future `respondent` will likely also be used to prevent other contexts from sending messages to the handle. :param function policy: Function invoked as `policy(msg, stream)` where `msg` is a :class:`mitogen.core.Message` about to be delivered, and `stream` is the :class:`mitogen.core.Stream` on which it was received. The function must return :data:`True`, otherwise an error is logged and delivery is refused. Two built-in policy functions exist: * :func:`has_parent_authority`: requires the message arrived from a parent context, or a context acting with a parent context's authority (``auth_id``). * :func:`mitogen.parent.is_immediate_child`: requires the message arrived from an immediately connected child, for use in messaging patterns where either something becomes buggy or insecure by permitting indirect upstream communication. In case of refusal, and the message's ``reply_to`` field is nonzero, a :class:`mitogen.core.CallError` is delivered to the sender indicating refusal occurred. :param bool overwrite: If :data:`True`, allow existing handles to be silently overwritten. :return: `handle`, or if `handle` was :data:`None`, the newly allocated handle. :raises Error: Attemp to register handle that was already registered. """ handle = handle or next(self._last_handle) _vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist) if handle in self._handle_map and not overwrite: raise Error(self.duplicate_handle_msg) self._handle_map[handle] = persist, fn, policy, respondent if respondent: if respondent not in self._handles_by_respondent: self._handles_by_respondent[respondent] = set() listen(respondent, 'disconnect', lambda: self._on_respondent_disconnect(respondent)) self._handles_by_respondent[respondent].add(handle) return handle
[ "def", "add_handler", "(", "self", ",", "fn", ",", "handle", "=", "None", ",", "persist", "=", "True", ",", "policy", "=", "None", ",", "respondent", "=", "None", ",", "overwrite", "=", "False", ")", ":", "handle", "=", "handle", "or", "next", "(", ...
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to `handle` from this context. Unregister after one invocation if `persist` is :data:`False`. If `handle` is :data:`None`, a new handle is allocated and returned. :param int handle: If not :data:`None`, an explicit handle to register, usually one of the ``mitogen.core.*`` constants. If unspecified, a new unused handle will be allocated. :param bool persist: If :data:`False`, the handler will be unregistered after a single message has been received. :param Context respondent: Context that messages to this handle are expected to be sent from. If specified, arranges for a dead message to be delivered to `fn` when disconnection of the context is detected. In future `respondent` will likely also be used to prevent other contexts from sending messages to the handle. :param function policy: Function invoked as `policy(msg, stream)` where `msg` is a :class:`mitogen.core.Message` about to be delivered, and `stream` is the :class:`mitogen.core.Stream` on which it was received. The function must return :data:`True`, otherwise an error is logged and delivery is refused. Two built-in policy functions exist: * :func:`has_parent_authority`: requires the message arrived from a parent context, or a context acting with a parent context's authority (``auth_id``). * :func:`mitogen.parent.is_immediate_child`: requires the message arrived from an immediately connected child, for use in messaging patterns where either something becomes buggy or insecure by permitting indirect upstream communication. In case of refusal, and the message's ``reply_to`` field is nonzero, a :class:`mitogen.core.CallError` is delivered to the sender indicating refusal occurred. :param bool overwrite: If :data:`True`, allow existing handles to be silently overwritten. :return: `handle`, or if `handle` was :data:`None`, the newly allocated handle. :raises Error: Attemp to register handle that was already registered.
[ "Invoke", "fn", "(", "msg", ")", "on", "the", ":", "class", ":", "Broker", "thread", "for", "each", "Message", "sent", "to", "handle", "from", "this", "context", ".", "Unregister", "after", "one", "invocation", "if", "persist", "is", ":", "data", ":", ...
python
train
45.15493
ungarj/mapchete
mapchete/commons/contours.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/commons/contours.py#L6-L55
def extract_contours(array, tile, interval=100, field='elev', base=0): """ Extract contour lines from an array. Parameters ---------- array : array input elevation data tile : Tile tile covering the array interval : integer elevation value interval when drawing contour lines field : string output field name containing elevation value base : integer elevation base value the intervals are computed from Returns ------- contours : iterable contours as GeoJSON-like pairs of properties and geometry """ import matplotlib.pyplot as plt levels = _get_contour_values( array.min(), array.max(), interval=interval, base=base) if not levels: return [] contours = plt.contour(array, levels) index = 0 out_contours = [] for level in range(len(contours.collections)): elevation = levels[index] index += 1 paths = contours.collections[level].get_paths() for path in paths: out_coords = [ ( tile.left + (y * tile.pixel_x_size), tile.top - (x * tile.pixel_y_size), ) for x, y in zip(path.vertices[:, 1], path.vertices[:, 0]) ] if len(out_coords) >= 2: out_contours.append( dict( properties={field: elevation}, geometry=mapping(LineString(out_coords)) ) ) return out_contours
[ "def", "extract_contours", "(", "array", ",", "tile", ",", "interval", "=", "100", ",", "field", "=", "'elev'", ",", "base", "=", "0", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "levels", "=", "_get_contour_values", "(", "array", ".", ...
Extract contour lines from an array. Parameters ---------- array : array input elevation data tile : Tile tile covering the array interval : integer elevation value interval when drawing contour lines field : string output field name containing elevation value base : integer elevation base value the intervals are computed from Returns ------- contours : iterable contours as GeoJSON-like pairs of properties and geometry
[ "Extract", "contour", "lines", "from", "an", "array", "." ]
python
valid
30.76
MIT-LCP/wfdb-python
wfdb/processing/evaluate.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/evaluate.py#L118-L197
def compare(self): """ Main comparison function """ """ Note: Make sure to be able to handle these ref/test scenarios: A: o----o---o---o x-------x----x B: o----o-----o---o x--------x--x--x C: o------o-----o---o x-x--------x--x--x D: o------o-----o---o x-x--------x-----x """ test_samp_num = 0 ref_samp_num = 0 # Iterate through the reference sample numbers while ref_samp_num < self.n_ref and test_samp_num < self.n_test: # Get the closest testing sample number for this reference sample closest_samp_num, smallest_samp_diff = ( self._get_closest_samp_num(ref_samp_num, test_samp_num)) # Get the closest testing sample number for the next reference # sample. This doesn't need to be called for the last index. if ref_samp_num < self.n_ref - 1: closest_samp_num_next, smallest_samp_diff_next = ( self._get_closest_samp_num(ref_samp_num + 1, test_samp_num)) else: # Set non-matching value if there is no next reference sample # to compete for the test sample closest_samp_num_next = -1 # Found a contested test sample number. Decide which # reference sample it belongs to. If the sample is closer to # the next reference sample, leave it to the next reference # sample and label this reference sample as unmatched. if (closest_samp_num == closest_samp_num_next and smallest_samp_diff_next < smallest_samp_diff): # Get the next closest sample for this reference sample, # if not already assigned to a previous sample. # It will be the previous testing sample number in any # possible case (scenario D below), or nothing. if closest_samp_num and (not ref_samp_num or closest_samp_num - 1 != self.matching_sample_nums[ref_samp_num - 1]): # The previous test annotation is inspected closest_samp_num = closest_samp_num - 1 smallest_samp_diff = abs(self.ref_sample[ref_samp_num] - self.test_sample[closest_samp_num]) # Assign the reference-test pair if close enough if smallest_samp_diff < self.window_width: self.matching_sample_nums[ref_samp_num] = closest_samp_num # Set the starting test sample number to inspect # for the next reference sample. test_samp_num = closest_samp_num + 1 # Otherwise there is no matching test annotation # If there is no clash, or the contested test sample is # closer to the current reference, keep the test sample # for this reference sample. else: # Assign the reference-test pair if close enough if smallest_samp_diff < self.window_width: self.matching_sample_nums[ref_samp_num] = closest_samp_num # Increment the starting test sample number to inspect # for the next reference sample. test_samp_num = closest_samp_num + 1 ref_samp_num += 1 self._calc_stats()
[ "def", "compare", "(", "self", ")", ":", "\"\"\"\n Note: Make sure to be able to handle these ref/test scenarios:\n\n A:\n o----o---o---o\n x-------x----x\n\n B:\n o----o-----o---o\n x--------x--x--x\n\n C:\n o------o-----o---o\n x-x-...
Main comparison function
[ "Main", "comparison", "function" ]
python
train
42.75
Azure/azure-sdk-for-python
azure-common/azure/common/credentials.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-common/azure/common/credentials.py#L31-L52
def get_azure_cli_credentials(resource=None, with_tenant=False): """Return Credentials and default SubscriptionID of current loaded profile of the CLI. Credentials will be the "az login" command: https://docs.microsoft.com/cli/azure/authenticate-azure-cli Default subscription ID is either the only one you have, or you can define it: https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli .. versionadded:: 1.1.6 :param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.) :param bool with_tenant: If True, return a three-tuple with last as tenant ID :return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant) :rtype: tuple """ profile = get_cli_profile() cred, subscription_id, tenant_id = profile.get_login_credentials(resource=resource) if with_tenant: return cred, subscription_id, tenant_id else: return cred, subscription_id
[ "def", "get_azure_cli_credentials", "(", "resource", "=", "None", ",", "with_tenant", "=", "False", ")", ":", "profile", "=", "get_cli_profile", "(", ")", "cred", ",", "subscription_id", ",", "tenant_id", "=", "profile", ".", "get_login_credentials", "(", "resou...
Return Credentials and default SubscriptionID of current loaded profile of the CLI. Credentials will be the "az login" command: https://docs.microsoft.com/cli/azure/authenticate-azure-cli Default subscription ID is either the only one you have, or you can define it: https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli .. versionadded:: 1.1.6 :param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.) :param bool with_tenant: If True, return a three-tuple with last as tenant ID :return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant) :rtype: tuple
[ "Return", "Credentials", "and", "default", "SubscriptionID", "of", "current", "loaded", "profile", "of", "the", "CLI", "." ]
python
test
43.681818
jasonrbriggs/stomp.py
stomp/transport.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L149-L158
def set_listener(self, name, listener): """ Set a named listener to use with this connection. See :py:class:`stomp.listener.ConnectionListener` :param str name: the name of the listener :param ConnectionListener listener: the listener object """ with self.__listeners_change_condition: self.listeners[name] = listener
[ "def", "set_listener", "(", "self", ",", "name", ",", "listener", ")", ":", "with", "self", ".", "__listeners_change_condition", ":", "self", ".", "listeners", "[", "name", "]", "=", "listener" ]
Set a named listener to use with this connection. See :py:class:`stomp.listener.ConnectionListener` :param str name: the name of the listener :param ConnectionListener listener: the listener object
[ "Set", "a", "named", "listener", "to", "use", "with", "this", "connection", ".", "See", ":", "py", ":", "class", ":", "stomp", ".", "listener", ".", "ConnectionListener" ]
python
train
37.7
pgmpy/pgmpy
pgmpy/factors/discrete/DiscreteFactor.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/DiscreteFactor.py#L333-L380
def normalize(self, inplace=True): """ Normalizes the values of factor so that they sum to 1. Parameters ---------- inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi.values array([[[ 0, 1], [ 2, 3], [ 4, 5]], [[ 6, 7], [ 8, 9], [10, 11]]]) >>> phi.normalize() >>> phi.variables ['x1', 'x2', 'x3'] >>> phi.cardinality array([2, 3, 2]) >>> phi.values array([[[ 0. , 0.01515152], [ 0.03030303, 0.04545455], [ 0.06060606, 0.07575758]], [[ 0.09090909, 0.10606061], [ 0.12121212, 0.13636364], [ 0.15151515, 0.16666667]]]) """ phi = self if inplace else self.copy() phi.values = phi.values / phi.values.sum() if not inplace: return phi
[ "def", "normalize", "(", "self", ",", "inplace", "=", "True", ")", ":", "phi", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "phi", ".", "values", "=", "phi", ".", "values", "/", "phi", ".", "values", ".", "sum", "(", ")", ...
Normalizes the values of factor so that they sum to 1. Parameters ---------- inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi.values array([[[ 0, 1], [ 2, 3], [ 4, 5]], [[ 6, 7], [ 8, 9], [10, 11]]]) >>> phi.normalize() >>> phi.variables ['x1', 'x2', 'x3'] >>> phi.cardinality array([2, 3, 2]) >>> phi.values array([[[ 0. , 0.01515152], [ 0.03030303, 0.04545455], [ 0.06060606, 0.07575758]], [[ 0.09090909, 0.10606061], [ 0.12121212, 0.13636364], [ 0.15151515, 0.16666667]]])
[ "Normalizes", "the", "values", "of", "factor", "so", "that", "they", "sum", "to", "1", "." ]
python
train
28.270833
AndrewIngram/django-extra-views
extra_views/dates.py
https://github.com/AndrewIngram/django-extra-views/blob/188e1bf1f15a44d9a599028d020083af9fb43ea7/extra_views/dates.py#L80-L90
def get_first_of_week(self): """ Returns an integer representing the first day of the week. 0 represents Monday, 6 represents Sunday. """ if self.first_of_week is None: raise ImproperlyConfigured("%s.first_of_week is required." % self.__class__.__name__) if self.first_of_week not in range(7): raise ImproperlyConfigured("%s.first_of_week must be an integer between 0 and 6." % self.__class__.__name__) return self.first_of_week
[ "def", "get_first_of_week", "(", "self", ")", ":", "if", "self", ".", "first_of_week", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "\"%s.first_of_week is required.\"", "%", "self", ".", "__class__", ".", "__name__", ")", "if", "self", ".", "first_of...
Returns an integer representing the first day of the week. 0 represents Monday, 6 represents Sunday.
[ "Returns", "an", "integer", "representing", "the", "first", "day", "of", "the", "week", "." ]
python
valid
45.363636
Toilal/rebulk
rebulk/match.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/match.py#L639-L645
def children(self): """ Children matches. """ if self._children is None: self._children = Matches(None, self.input_string) return self._children
[ "def", "children", "(", "self", ")", ":", "if", "self", ".", "_children", "is", "None", ":", "self", ".", "_children", "=", "Matches", "(", "None", ",", "self", ".", "input_string", ")", "return", "self", ".", "_children" ]
Children matches.
[ "Children", "matches", "." ]
python
train
27.142857
tanghaibao/goatools
goatools/grouper/aart_geneproducts_one.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/aart_geneproducts_one.py#L68-L72
def str_summaryline(self): """Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase.""" return "{N} GOs, {M} genes described by {X} of {Y} sections {NM}".format( N=len(self.go2nt), M=len(self.gene2gos), X=len(self.sec2chr), Y=len(self.datobj.sec2chr), NM=self.name)
[ "def", "str_summaryline", "(", "self", ")", ":", "return", "\"{N} GOs, {M} genes described by {X} of {Y} sections {NM}\"", ".", "format", "(", "N", "=", "len", "(", "self", ".", "go2nt", ")", ",", "M", "=", "len", "(", "self", ".", "gene2gos", ")", ",", "X",...
Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase.
[ "Print", ":", "47", "GOs", "262", "genes", "described", "by", "10", "of", "19", "sections", "consistent_increase", "." ]
python
train
64.6
MostAwesomeDude/gentleman
gentleman/base.py
https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/base.py#L113-L131
def AddClusterTags(r, tags, dry_run=False): """ Adds tags to the cluster. @type tags: list of str @param tags: tags to add to the cluster @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """ query = { "dry-run": dry_run, "tag": tags, } return r.request("put", "/2/tags", query=query)
[ "def", "AddClusterTags", "(", "r", ",", "tags", ",", "dry_run", "=", "False", ")", ":", "query", "=", "{", "\"dry-run\"", ":", "dry_run", ",", "\"tag\"", ":", "tags", ",", "}", "return", "r", ".", "request", "(", "\"put\"", ",", "\"/2/tags\"", ",", "...
Adds tags to the cluster. @type tags: list of str @param tags: tags to add to the cluster @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id
[ "Adds", "tags", "to", "the", "cluster", "." ]
python
train
19.842105
CitrineInformatics/python-citrination-client
citrination_client/views/client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/client.py#L86-L96
def get(self, data_view_id): """ Gets basic information about a view :param data_view_id: Identifier of the data view :return: Metadata about the view as JSON """ failure_message = "Dataview get failed" return self._get_success_json(self._get( 'v1/data_views/' + data_view_id, None, failure_message=failure_message))['data']['data_view']
[ "def", "get", "(", "self", ",", "data_view_id", ")", ":", "failure_message", "=", "\"Dataview get failed\"", "return", "self", ".", "_get_success_json", "(", "self", ".", "_get", "(", "'v1/data_views/'", "+", "data_view_id", ",", "None", ",", "failure_message", ...
Gets basic information about a view :param data_view_id: Identifier of the data view :return: Metadata about the view as JSON
[ "Gets", "basic", "information", "about", "a", "view" ]
python
valid
36.090909
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L696-L703
def _detect_encoding(readline): """Return file encoding.""" try: from lib2to3.pgen2 import tokenize as lib2to3_tokenize encoding = lib2to3_tokenize.detect_encoding(readline)[0] return encoding except (LookupError, SyntaxError, UnicodeDecodeError): return 'latin-1'
[ "def", "_detect_encoding", "(", "readline", ")", ":", "try", ":", "from", "lib2to3", ".", "pgen2", "import", "tokenize", "as", "lib2to3_tokenize", "encoding", "=", "lib2to3_tokenize", ".", "detect_encoding", "(", "readline", ")", "[", "0", "]", "return", "enco...
Return file encoding.
[ "Return", "file", "encoding", "." ]
python
test
37.625
aiven/pghoard
pghoard/restore.py
https://github.com/aiven/pghoard/blob/2994165d4ef3ff7a5669a2527346bcbfb5b3bd8a/pghoard/restore.py#L194-L199
def list_basebackups(self, arg): """List basebackups from an object store""" self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False) site = config.get_site_from_config(self.config, arg.site) self.storage = self._get_object_storage(site, pgdata=None) self.storage.show_basebackup_list(verbose=arg.verbose)
[ "def", "list_basebackups", "(", "self", ",", "arg", ")", ":", "self", ".", "config", "=", "config", ".", "read_json_config_file", "(", "arg", ".", "config", ",", "check_commands", "=", "False", ",", "check_pgdata", "=", "False", ")", "site", "=", "config",...
List basebackups from an object store
[ "List", "basebackups", "from", "an", "object", "store" ]
python
train
63.333333
blackecho/Deep-Learning-TensorFlow
yadlt/core/layers.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/core/layers.py#L80-L106
def accuracy(mod_y, ref_y, summary=True, name="accuracy"): """Accuracy computation op. Parameters ---------- mod_y : tf.Tensor Model output tensor. ref_y : tf.Tensor Reference input tensor. summary : bool, optional (default = True) Whether to save tf summary for the op. Returns ------- tf.Tensor : accuracy op. tensor """ with tf.name_scope(name): mod_pred = tf.argmax(mod_y, 1) correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) if summary: tf.summary.scalar('accuracy', accuracy) return accuracy
[ "def", "accuracy", "(", "mod_y", ",", "ref_y", ",", "summary", "=", "True", ",", "name", "=", "\"accuracy\"", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "mod_pred", "=", "tf", ".", "argmax", "(", "mod_y", ",", "1", ")", "corr...
Accuracy computation op. Parameters ---------- mod_y : tf.Tensor Model output tensor. ref_y : tf.Tensor Reference input tensor. summary : bool, optional (default = True) Whether to save tf summary for the op. Returns ------- tf.Tensor : accuracy op. tensor
[ "Accuracy", "computation", "op", "." ]
python
train
27.518519
jtpaasch/simplygithub
simplygithub/files.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/files.py#L115-L144
def add_file_to_tree(tree, file_path, file_contents, is_executable=False): """Add a file to a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of the new file in the tree. file_contents The (UTF-8 encoded) contents of the new file. is_executable If ``True``, the new file will get executable permissions (0755). Otherwise, it will get 0644 permissions. Returns: The provided tree, but with the new file added. """ record = { "path": file_path, "mode": "100755" if is_executable else "100644", "type": "blob", "content": file_contents, } tree.append(record) return tree
[ "def", "add_file_to_tree", "(", "tree", ",", "file_path", ",", "file_contents", ",", "is_executable", "=", "False", ")", ":", "record", "=", "{", "\"path\"", ":", "file_path", ",", "\"mode\"", ":", "\"100755\"", "if", "is_executable", "else", "\"100644\"", ","...
Add a file to a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of the new file in the tree. file_contents The (UTF-8 encoded) contents of the new file. is_executable If ``True``, the new file will get executable permissions (0755). Otherwise, it will get 0644 permissions. Returns: The provided tree, but with the new file added.
[ "Add", "a", "file", "to", "a", "tree", "." ]
python
train
25.333333
LonamiWebs/Telethon
telethon/tl/custom/inlinebuilder.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/inlinebuilder.py#L59-L108
async def article( self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): """ Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. """ # TODO Does 'article' work always? # article, photo, gif, mpeg4_gif, video, audio, # voice, document, location, venue, contact, game result = types.InputBotInlineResult( id=id or '', type='article', send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ), title=title, description=description, url=url, thumb=thumb, content=content ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result
[ "async", "def", "article", "(", "self", ",", "title", ",", "description", "=", "None", ",", "*", ",", "url", "=", "None", ",", "thumb", "=", "None", ",", "content", "=", "None", ",", "id", "=", "None", ",", "text", "=", "None", ",", "parse_mode", ...
Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present.
[ "Creates", "new", "inline", "result", "of", "article", "type", "." ]
python
train
34.7
ofw/curlify
curlify.py
https://github.com/ofw/curlify/blob/5a464218431f979ac78d089682d36860b57420ce/curlify.py#L4-L42
def to_curl(request, compressed=False, verify=True): """ Returns string with curl command by provided request object Parameters ---------- compressed : bool If `True` then `--compressed` argument will be added to result """ parts = [ ('curl', None), ('-X', request.method), ] for k, v in sorted(request.headers.items()): parts += [('-H', '{0}: {1}'.format(k, v))] if request.body: body = request.body if isinstance(body, bytes): body = body.decode('utf-8') parts += [('-d', body)] if compressed: parts += [('--compressed', None)] if not verify: parts += [('--insecure', None)] parts += [(None, request.url)] flat_parts = [] for k, v in parts: if k: flat_parts.append(k) if v: flat_parts.append("'{0}'".format(v)) return ' '.join(flat_parts)
[ "def", "to_curl", "(", "request", ",", "compressed", "=", "False", ",", "verify", "=", "True", ")", ":", "parts", "=", "[", "(", "'curl'", ",", "None", ")", ",", "(", "'-X'", ",", "request", ".", "method", ")", ",", "]", "for", "k", ",", "v", "...
Returns string with curl command by provided request object Parameters ---------- compressed : bool If `True` then `--compressed` argument will be added to result
[ "Returns", "string", "with", "curl", "command", "by", "provided", "request", "object" ]
python
train
23.025641
gebn/wood
wood/integrations/s3.py
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/integrations/s3.py#L16-L77
def objects_to_root(objects: List) -> Root: """ Convert a list of s3 ObjectSummaries into a directory tree. :param objects: The list of objects, e.g. the result of calling `.objects.all()` on a bucket. :return: The tree structure, contained within a root node. """ def _to_tree(objs: Iterable) -> Dict: """ Build a tree structure from a flat list of objects. :param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a bucket listing. :return: The listing as a nested dictionary where keys are directory and file names. The values of directories will in turn be a dict. The values of keys representing files will be the `ObjectSummary` instance. """ path_tree = {} for obj in objs: is_dir = obj.key.endswith('/') chunks = [chunk for chunk in obj.key.split('/') if chunk] chunk_count = len(chunks) tmp = path_tree for i, chunk in enumerate(chunks): is_last_chunk = i == chunk_count - 1 if is_last_chunk and not is_dir: tmp[chunk] = obj else: # must be a directory if chunk not in tmp: # it doesn't exist - create it tmp[chunk] = {} tmp = tmp[chunk] return path_tree def _to_entity(key: str, value: Union[Dict, Any]) -> Entity: """ Turn a nested dictionary representing an S3 bucket into the correct `Entity` object. :param key: The name of the entity. :param value: If the entity is a directory, the nested dict representing its contents. Otherwise, the `ObjectSummary` instance representing the file. :return: The entity representing the entity name and value pair. """ if isinstance(value, dict): return Directory( key, {key_: _to_entity(key_, value_) for key_, value_ in value.items()}) return File(pathlib.PurePath(value.key).name, value.size, value.e_tag.strip('"')) tree = _to_tree(objects) return Root({pathlib.PurePath(key).name: _to_entity(key, value) for key, value in tree.items()})
[ "def", "objects_to_root", "(", "objects", ":", "List", ")", "->", "Root", ":", "def", "_to_tree", "(", "objs", ":", "Iterable", ")", "->", "Dict", ":", "\"\"\"\n Build a tree structure from a flat list of objects.\n\n :param objs: The raw iterable of S3 `ObjectS...
Convert a list of s3 ObjectSummaries into a directory tree. :param objects: The list of objects, e.g. the result of calling `.objects.all()` on a bucket. :return: The tree structure, contained within a root node.
[ "Convert", "a", "list", "of", "s3", "ObjectSummaries", "into", "a", "directory", "tree", "." ]
python
train
38.580645
galactics/beyond
beyond/frames/iau2010.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/frames/iau2010.py#L54-L64
def _earth_orientation(date): """Earth orientation parameters in degrees """ ttt = date.change_scale('TT').julian_century # a_a = 0.12 # a_c = 0.26 # s_prime = -0.0015 * (a_c ** 2 / 1.2 + a_a ** 2) * ttt s_prime = - 0.000047 * ttt return date.eop.x / 3600., date.eop.y / 3600., s_prime / 3600
[ "def", "_earth_orientation", "(", "date", ")", ":", "ttt", "=", "date", ".", "change_scale", "(", "'TT'", ")", ".", "julian_century", "# a_a = 0.12", "# a_c = 0.26", "# s_prime = -0.0015 * (a_c ** 2 / 1.2 + a_a ** 2) * ttt", "s_prime", "=", "-", "0.000047", "*", "ttt"...
Earth orientation parameters in degrees
[ "Earth", "orientation", "parameters", "in", "degrees" ]
python
train
28.727273
marcomusy/vtkplotter
vtkplotter/addons.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/addons.py#L645-L1012
def addAxes(axtype=None, c=None): """Draw axes on scene. Available axes types: :param int axtype: - 0, no axes, - 1, draw three gray grid walls - 2, show cartesian axes from (0,0,0) - 3, show positive range of cartesian axes from (0,0,0) - 4, show a triad at bottom left - 5, show a cube at bottom left - 6, mark the corners of the bounding box - 7, draw a simple ruler at the bottom of the window - 8, show the ``vtkCubeAxesActor`` object - 9, show the bounding box outLine - 10, show three circles representing the maximum bounding box """ vp = settings.plotter_instance if axtype is not None: vp.axes = axtype # overrride r = vp.renderers.index(vp.renderer) if not vp.axes: return if c is None: # automatic black or white c = (0.9, 0.9, 0.9) if numpy.sum(vp.renderer.GetBackground()) > 1.5: c = (0.1, 0.1, 0.1) if not vp.renderer: return if vp.axes_exist[r]: return # calculate max actors bounds bns = [] for a in vp.actors: if a and a.GetPickable(): b = a.GetBounds() if b: bns.append(b) if len(bns): max_bns = numpy.max(bns, axis=0) min_bns = numpy.min(bns, axis=0) vbb = (min_bns[0], max_bns[1], min_bns[2], max_bns[3], min_bns[4], max_bns[5]) else: vbb = vp.renderer.ComputeVisiblePropBounds() max_bns = vbb min_bns = vbb sizes = (max_bns[1] - min_bns[0], max_bns[3] - min_bns[2], max_bns[5] - min_bns[4]) ############################################################ if vp.axes == 1 or vp.axes == True: # gray grid walls nd = 4 # number of divisions in the smallest axis off = -0.04 # label offset step = numpy.min(sizes) / nd if not step: # bad proportions, use vtkCubeAxesActor vp.addAxes(axtype=8, c=c) vp.axes = 1 return rx, ry, rz = numpy.rint(sizes / step).astype(int) if max([rx / ry, ry / rx, rx / rz, rz / rx, ry / rz, rz / ry]) > 15: # bad proportions, use vtkCubeAxesActor vp.addAxes(axtype=8, c=c) vp.axes = 1 return gxy = shapes.Grid(pos=(0.5, 0.5, 0), normal=[0, 0, 1], bc=None, resx=rx, resy=ry) gxz = shapes.Grid(pos=(0.5, 0, 0.5), normal=[0, 1, 0], bc=None, resx=rz, resy=rx) gyz = shapes.Grid(pos=(0, 0.5, 0.5), normal=[1, 0, 0], bc=None, resx=rz, resy=ry) gxy.alpha(0.06).wire(False).color(c).lineWidth(1) gxz.alpha(0.04).wire(False).color(c).lineWidth(1) gyz.alpha(0.04).wire(False).color(c).lineWidth(1) xa = shapes.Line([0, 0, 0], [1, 0, 0], c=c, lw=1) ya = shapes.Line([0, 0, 0], [0, 1, 0], c=c, lw=1) za = shapes.Line([0, 0, 0], [0, 0, 1], c=c, lw=1) xt, yt, zt, ox, oy, oz = [None] * 6 if vp.xtitle: xtitle = vp.xtitle if min_bns[0] <= 0 and max_bns[1] > 0: # mark x origin ox = shapes.Cube([-min_bns[0] / sizes[0], 0, 0], side=0.008, c=c) if len(vp.xtitle) == 1: # add axis length info xtitle = vp.xtitle + " /" + utils.precision(sizes[0], 4) wpos = [1 - (len(vp.xtitle) + 1) / 40, off, 0] xt = shapes.Text(xtitle, pos=wpos, normal=(0, 0, 1), s=0.025, c=c, justify="bottom-right") if vp.ytitle: if min_bns[2] <= 0 and max_bns[3] > 0: # mark y origin oy = shapes.Cube([0, -min_bns[2] / sizes[1], 0], side=0.008, c=c) yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1), s=0.025, c=c, justify="bottom-right") if len(vp.ytitle) == 1: wpos = [off, 1 - (len(vp.ytitle) + 1) / 40, 0] yt.pos(wpos) else: wpos = [off * 0.7, 1 - (len(vp.ytitle) + 1) / 40, 0] yt.rotateZ(90).pos(wpos) if vp.ztitle: if min_bns[4] <= 0 and max_bns[5] > 0: # mark z origin oz = shapes.Cube([0, 0, -min_bns[4] / sizes[2]], side=0.008, c=c) zt = shapes.Text(vp.ztitle, pos=(0, 0, 0), normal=(1, -1, 0), s=0.025, c=c, justify="bottom-right") if len(vp.ztitle) == 1: wpos = [off * 0.6, off * 0.6, 1 - (len(vp.ztitle) + 1) / 40] zt.rotate(90, (1, -1, 0)).pos(wpos) else: wpos = [off * 0.3, off * 0.3, 1 - (len(vp.ztitle) + 1) / 40] zt.rotate(180, (1, -1, 0)).pos(wpos) acts = [gxy, gxz, gyz, xa, ya, za, xt, yt, zt, ox, oy, oz] for a in acts: if a: a.PickableOff() aa = Assembly(acts) aa.pos(min_bns[0], min_bns[2], min_bns[4]) aa.SetScale(sizes) aa.PickableOff() vp.renderer.AddActor(aa) vp.axes_exist[r] = aa elif vp.axes == 2 or vp.axes == 3: vbb = vp.renderer.ComputeVisiblePropBounds() # to be double checked xcol, ycol, zcol = "db", "dg", "dr" s = 1 alpha = 1 centered = False x0, x1, y0, y1, z0, z1 = vbb dx, dy, dz = x1 - x0, y1 - y0, z1 - z0 aves = numpy.sqrt(dx * dx + dy * dy + dz * dz) / 2 x0, x1 = min(x0, 0), max(x1, 0) y0, y1 = min(y0, 0), max(y1, 0) z0, z1 = min(z0, 0), max(z1, 0) if vp.axes == 3: if x1 > 0: x0 = 0 if y1 > 0: y0 = 0 if z1 > 0: z0 = 0 dx, dy, dz = x1 - x0, y1 - y0, z1 - z0 acts = [] if x0 * x1 <= 0 or y0 * z1 <= 0 or z0 * z1 <= 0: # some ranges contain origin zero = shapes.Sphere(r=aves / 120 * s, c="k", alpha=alpha, res=10) acts += [zero] if len(vp.xtitle) and dx > aves/100: xl = shapes.Cylinder([[x0, 0, 0], [x1, 0, 0]], r=aves/250*s, c=xcol, alpha=alpha) xc = shapes.Cone(pos=[x1, 0, 0], c=xcol, alpha=alpha, r=aves/100*s, height=aves/25*s, axis=[1, 0, 0], res=10) wpos = [x1-(len(vp.xtitle)+1)*aves/40*s, -aves/25*s, 0] # aligned to arrow tip if centered: wpos = [(x0 + x1) / 2 - len(vp.xtitle) / 2 * aves / 40 * s, -aves / 25 * s, 0] xt = shapes.Text(vp.xtitle, pos=wpos, normal=(0, 0, 1), s=aves / 40 * s, c=xcol) acts += [xl, xc, xt] if len(vp.ytitle) and dy > aves/100: yl = shapes.Cylinder([[0, y0, 0], [0, y1, 0]], r=aves/250*s, c=ycol, alpha=alpha) yc = shapes.Cone(pos=[0, y1, 0], c=ycol, alpha=alpha, r=aves/100*s, height=aves/25*s, axis=[0, 1, 0], res=10) wpos = [-aves/40*s, y1-(len(vp.ytitle)+1)*aves/40*s, 0] if centered: wpos = [-aves / 40 * s, (y0 + y1) / 2 - len(vp.ytitle) / 2 * aves / 40 * s, 0] yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1), s=aves / 40 * s, c=ycol) yt.rotate(90, [0, 0, 1]).pos(wpos) acts += [yl, yc, yt] if len(vp.ztitle) and dz > aves/100: zl = shapes.Cylinder([[0, 0, z0], [0, 0, z1]], r=aves/250*s, c=zcol, alpha=alpha) zc = shapes.Cone(pos=[0, 0, z1], c=zcol, alpha=alpha, r=aves/100*s, height=aves/25*s, axis=[0, 0, 1], res=10) wpos = [-aves/50*s, -aves/50*s, z1 - (len(vp.ztitle)+1)*aves/40*s] if centered: wpos = [-aves/50*s, -aves/50*s, (z0+z1)/2-len(vp.ztitle)/2*aves/40*s] zt = shapes.Text(vp.ztitle, pos=(0,0,0), normal=(1, -1, 0), s=aves/40*s, c=zcol) zt.rotate(180, (1, -1, 0)).pos(wpos) acts += [zl, zc, zt] for a in acts: a.PickableOff() ass = Assembly(acts) ass.PickableOff() vp.renderer.AddActor(ass) vp.axes_exist[r] = ass elif vp.axes == 4: axact = vtk.vtkAxesActor() axact.SetShaftTypeToCylinder() axact.SetCylinderRadius(0.03) axact.SetXAxisLabelText(vp.xtitle) axact.SetYAxisLabelText(vp.ytitle) axact.SetZAxisLabelText(vp.ztitle) axact.GetXAxisShaftProperty().SetColor(0, 0, 1) axact.GetZAxisShaftProperty().SetColor(1, 0, 0) axact.GetXAxisTipProperty().SetColor(0, 0, 1) axact.GetZAxisTipProperty().SetColor(1, 0, 0) bc = numpy.array(vp.renderer.GetBackground()) if numpy.sum(bc) < 1.5: lc = (1, 1, 1) else: lc = (0, 0, 0) axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().BoldOff() axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().BoldOff() axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().BoldOff() axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff() axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff() axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff() axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff() axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff() axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff() axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc) axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc) axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc) axact.PickableOff() icn = addIcon(axact, size=0.1) vp.axes_exist[r] = icn elif vp.axes == 5: axact = vtk.vtkAnnotatedCubeActor() axact.GetCubeProperty().SetColor(0.75, 0.75, 0.75) axact.SetTextEdgesVisibility(0) axact.SetFaceTextScale(0.4) axact.GetXPlusFaceProperty().SetColor(colors.getColor("b")) axact.GetXMinusFaceProperty().SetColor(colors.getColor("db")) axact.GetYPlusFaceProperty().SetColor(colors.getColor("g")) axact.GetYMinusFaceProperty().SetColor(colors.getColor("dg")) axact.GetZPlusFaceProperty().SetColor(colors.getColor("r")) axact.GetZMinusFaceProperty().SetColor(colors.getColor("dr")) axact.PickableOff() icn = addIcon(axact, size=0.06) vp.axes_exist[r] = icn elif vp.axes == 6: ocf = vtk.vtkOutlineCornerFilter() ocf.SetCornerFactor(0.1) largestact, sz = None, -1 for a in vp.actors: if a.GetPickable(): b = a.GetBounds() d = max(b[1]-b[0], b[3]-b[2], b[5]-b[4]) if sz < d: largestact = a sz = d if isinstance(largestact, Assembly): ocf.SetInputData(largestact.getActor(0).GetMapper().GetInput()) else: ocf.SetInputData(largestact.polydata()) ocf.Update() ocMapper = vtk.vtkHierarchicalPolyDataMapper() ocMapper.SetInputConnection(0, ocf.GetOutputPort(0)) ocActor = vtk.vtkActor() ocActor.SetMapper(ocMapper) bc = numpy.array(vp.renderer.GetBackground()) if numpy.sum(bc) < 1.5: lc = (1, 1, 1) else: lc = (0, 0, 0) ocActor.GetProperty().SetColor(lc) ocActor.PickableOff() vp.renderer.AddActor(ocActor) vp.axes_exist[r] = ocActor elif vp.axes == 7: # draws a simple ruler at the bottom of the window ls = vtk.vtkLegendScaleActor() ls.RightAxisVisibilityOff() ls.TopAxisVisibilityOff() ls.LegendVisibilityOff() ls.LeftAxisVisibilityOff() ls.GetBottomAxis().SetNumberOfMinorTicks(1) ls.GetBottomAxis().GetProperty().SetColor(c) ls.GetBottomAxis().GetLabelTextProperty().SetColor(c) ls.GetBottomAxis().GetLabelTextProperty().BoldOff() ls.GetBottomAxis().GetLabelTextProperty().ItalicOff() ls.GetBottomAxis().GetLabelTextProperty().ShadowOff() ls.PickableOff() vp.renderer.AddActor(ls) vp.axes_exist[r] = ls elif vp.axes == 8: ca = vtk.vtkCubeAxesActor() ca.SetBounds(vbb) if vp.camera: ca.SetCamera(vp.camera) else: ca.SetCamera(vp.renderer.GetActiveCamera()) ca.GetXAxesLinesProperty().SetColor(c) ca.GetYAxesLinesProperty().SetColor(c) ca.GetZAxesLinesProperty().SetColor(c) for i in range(3): ca.GetLabelTextProperty(i).SetColor(c) ca.GetTitleTextProperty(i).SetColor(c) ca.SetTitleOffset(5) ca.SetFlyMode(3) ca.SetXTitle(vp.xtitle) ca.SetYTitle(vp.ytitle) ca.SetZTitle(vp.ztitle) if vp.xtitle == "": ca.SetXAxisVisibility(0) ca.XAxisLabelVisibilityOff() if vp.ytitle == "": ca.SetYAxisVisibility(0) ca.YAxisLabelVisibilityOff() if vp.ztitle == "": ca.SetZAxisVisibility(0) ca.ZAxisLabelVisibilityOff() ca.PickableOff() vp.renderer.AddActor(ca) vp.axes_exist[r] = ca return elif vp.axes == 9: src = vtk.vtkCubeSource() src.SetXLength(vbb[1] - vbb[0]) src.SetYLength(vbb[3] - vbb[2]) src.SetZLength(vbb[5] - vbb[4]) src.Update() ca = Actor(src.GetOutput(), c=c, alpha=0.5, wire=1) ca.pos((vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2) ca.PickableOff() vp.renderer.AddActor(ca) vp.axes_exist[r] = ca elif vp.axes == 10: x0 = (vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2 rx, ry, rz = (vbb[1]-vbb[0])/2, (vbb[3]-vbb[2])/2, (vbb[5]-vbb[4])/2 rm = max(rx, ry, rz) xc = shapes.Disc(x0, (0,0,1), r1=rm, r2=rm, c='lr', bc=None, res=1, resphi=72) yc = shapes.Disc(x0, (0,1,0), r1=rm, r2=rm, c='lg', bc=None, res=1, resphi=72) zc = shapes.Disc(x0, (1,0,0), r1=rm, r2=rm, c='lb', bc=None, res=1, resphi=72) xc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff() yc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff() zc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff() ca = xc + yc + zc ca.PickableOff() vp.renderer.AddActor(ca) vp.axes_exist[r] = ca else: colors.printc('~bomb Keyword axes must be in range [0-10].', c=1) colors.printc(''' ~target Available axes types: 0 = no axes, 1 = draw three gray grid walls 2 = show cartesian axes from (0,0,0) 3 = show positive range of cartesian axes from (0,0,0) 4 = show a triad at bottom left 5 = show a cube at bottom left 6 = mark the corners of the bounding box 7 = draw a simple ruler at the bottom of the window 8 = show the vtkCubeAxesActor object 9 = show the bounding box outline 10 = show three circles representing the maximum bounding box ''', c=1, bold=0) if not vp.axes_exist[r]: vp.axes_exist[r] = True return
[ "def", "addAxes", "(", "axtype", "=", "None", ",", "c", "=", "None", ")", ":", "vp", "=", "settings", ".", "plotter_instance", "if", "axtype", "is", "not", "None", ":", "vp", ".", "axes", "=", "axtype", "# overrride", "r", "=", "vp", ".", "renderers"...
Draw axes on scene. Available axes types: :param int axtype: - 0, no axes, - 1, draw three gray grid walls - 2, show cartesian axes from (0,0,0) - 3, show positive range of cartesian axes from (0,0,0) - 4, show a triad at bottom left - 5, show a cube at bottom left - 6, mark the corners of the bounding box - 7, draw a simple ruler at the bottom of the window - 8, show the ``vtkCubeAxesActor`` object - 9, show the bounding box outLine - 10, show three circles representing the maximum bounding box
[ "Draw", "axes", "on", "scene", ".", "Available", "axes", "types", ":" ]
python
train
40.244565
gwastro/pycbc
pycbc/pnutils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pnutils.py#L642-L645
def _meco_frequency(m1, m2, chi1, chi2): """Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin) """ return velocity_to_frequency(meco_velocity(m1, m2, chi1, chi2), m1+m2)
[ "def", "_meco_frequency", "(", "m1", ",", "m2", ",", "chi1", ",", "chi2", ")", ":", "return", "velocity_to_frequency", "(", "meco_velocity", "(", "m1", ",", "m2", ",", "chi1", ",", "chi2", ")", ",", "m1", "+", "m2", ")" ]
Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin)
[ "Returns", "the", "frequency", "of", "the", "minimum", "energy", "cutoff", "for", "3", ".", "5pN", "(", "2", ".", "5pN", "spin", ")" ]
python
train
50.25
BlueBrain/NeuroM
examples/plot_somas.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/plot_somas.py#L48-L54
def plot_somas(somas): '''Plot set of somas on same figure as spheres, each with different color''' _, ax = common.get_figure(new_fig=True, subplot=111, params={'projection': '3d', 'aspect': 'equal'}) for s in somas: common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1) plt.show()
[ "def", "plot_somas", "(", "somas", ")", ":", "_", ",", "ax", "=", "common", ".", "get_figure", "(", "new_fig", "=", "True", ",", "subplot", "=", "111", ",", "params", "=", "{", "'projection'", ":", "'3d'", ",", "'aspect'", ":", "'equal'", "}", ")", ...
Plot set of somas on same figure as spheres, each with different color
[ "Plot", "set", "of", "somas", "on", "same", "figure", "as", "spheres", "each", "with", "different", "color" ]
python
train
49.857143
rootpy/rootpy
rootpy/plotting/hist.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L777-L806
def underflow(self, axis=0): """ Return the underflow for the given axis. Depending on the dimension of the histogram, may return an array. """ if axis not in range(3): raise ValueError("axis must be 0, 1, or 2") if self.DIM == 1: return self.GetBinContent(0) elif self.DIM == 2: def idx(i): arg = [i] arg.insert(axis, 0) return arg return [ self.GetBinContent(*idx(i)) for i in self.bins_range(axis=(axis + 1) % 2, overflow=True)] elif self.DIM == 3: axes = [0, 1, 2] axes.remove(axis) axis2, axis3 = axes def idx(i, j): arg = [i, j] arg.insert(axis, 0) return arg return [[ self.GetBinContent(*idx(i, j)) for i in self.bins_range(axis=axis2, overflow=True)] for j in self.bins_range(axis=axis3, overflow=True)]
[ "def", "underflow", "(", "self", ",", "axis", "=", "0", ")", ":", "if", "axis", "not", "in", "range", "(", "3", ")", ":", "raise", "ValueError", "(", "\"axis must be 0, 1, or 2\"", ")", "if", "self", ".", "DIM", "==", "1", ":", "return", "self", ".",...
Return the underflow for the given axis. Depending on the dimension of the histogram, may return an array.
[ "Return", "the", "underflow", "for", "the", "given", "axis", "." ]
python
train
34.4
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/027a270febf5bcda6a75db60ea9838d631370f4b/mechanicalsoup/browser.py#L58-L63
def __looks_like_html(response): """Guesses entity type when Content-Type header is missing. Since Content-Type is not strictly required, some servers leave it out. """ text = response.text.lstrip().lower() return text.startswith('<html') or text.startswith('<!doctype')
[ "def", "__looks_like_html", "(", "response", ")", ":", "text", "=", "response", ".", "text", ".", "lstrip", "(", ")", ".", "lower", "(", ")", "return", "text", ".", "startswith", "(", "'<html'", ")", "or", "text", ".", "startswith", "(", "'<!doctype'", ...
Guesses entity type when Content-Type header is missing. Since Content-Type is not strictly required, some servers leave it out.
[ "Guesses", "entity", "type", "when", "Content", "-", "Type", "header", "is", "missing", ".", "Since", "Content", "-", "Type", "is", "not", "strictly", "required", "some", "servers", "leave", "it", "out", "." ]
python
train
50.833333
KnorrFG/pyparadigm
pyparadigm/misc.py
https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/misc.py#L135-L160
def process_char(buffer: str, char: str, mappings=_char_mappings): """This is a convinience method for use with EventListener.wait_for_unicode_char(). In most cases it simply appends char to buffer. Some replacements are done because presing return will produce '\\r' but for most cases '\\n' would be desireable. Also backspace cant just be added to a string either, therefore, if char is "\\u0008" the last character from buffer will be cut off. The replacement from '\\r' to '\\n' is done using the mappings argument, the default value for it also contains a mapping from '\t' to 4 spaces. :param buffer: the string to be updated :type buffer: str :param char: the unicode character to be processed :type char: str :param mappings: a dict containing mappings :type mappings: dict :returns: a new string""" if char in mappings: return buffer + mappings[char] elif char == "\u0008": return buffer[:-1] if len(buffer) > 0 else buffer else: return buffer + char
[ "def", "process_char", "(", "buffer", ":", "str", ",", "char", ":", "str", ",", "mappings", "=", "_char_mappings", ")", ":", "if", "char", "in", "mappings", ":", "return", "buffer", "+", "mappings", "[", "char", "]", "elif", "char", "==", "\"\\u0008\"", ...
This is a convinience method for use with EventListener.wait_for_unicode_char(). In most cases it simply appends char to buffer. Some replacements are done because presing return will produce '\\r' but for most cases '\\n' would be desireable. Also backspace cant just be added to a string either, therefore, if char is "\\u0008" the last character from buffer will be cut off. The replacement from '\\r' to '\\n' is done using the mappings argument, the default value for it also contains a mapping from '\t' to 4 spaces. :param buffer: the string to be updated :type buffer: str :param char: the unicode character to be processed :type char: str :param mappings: a dict containing mappings :type mappings: dict :returns: a new string
[ "This", "is", "a", "convinience", "method", "for", "use", "with", "EventListener", ".", "wait_for_unicode_char", "()", ".", "In", "most", "cases", "it", "simply", "appends", "char", "to", "buffer", ".", "Some", "replacements", "are", "done", "because", "presin...
python
train
39.884615
django-json-api/django-rest-framework-json-api
rest_framework_json_api/filters.py
https://github.com/django-json-api/django-rest-framework-json-api/blob/de7021f9e011615ce8b65d0cb38227c6c12721b6/rest_framework_json_api/filters.py#L25-L58
def remove_invalid_fields(self, queryset, fields, view, request): """ Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to validate that all provided sort fields exist (as contrasted with the super's behavior which is to silently remove invalid fields). :raises ValidationError: if a sort field is invalid. """ valid_fields = [ item[0] for item in self.get_valid_fields(queryset, view, {'request': request}) ] bad_terms = [ term for term in fields if format_value(term.replace(".", "__").lstrip('-'), "underscore") not in valid_fields ] if bad_terms: raise ValidationError('invalid sort parameter{}: {}'.format( ('s' if len(bad_terms) > 1 else ''), ','.join(bad_terms))) # this looks like it duplicates code above, but we want the ValidationError to report # the actual parameter supplied while we want the fields passed to the super() to # be correctly rewritten. # The leading `-` has to be stripped to prevent format_value from turning it into `_`. underscore_fields = [] for item in fields: item_rewritten = item.replace(".", "__") if item_rewritten.startswith('-'): underscore_fields.append( '-' + format_value(item_rewritten.lstrip('-'), "underscore")) else: underscore_fields.append(format_value(item_rewritten, "underscore")) return super(OrderingFilter, self).remove_invalid_fields( queryset, underscore_fields, view, request)
[ "def", "remove_invalid_fields", "(", "self", ",", "queryset", ",", "fields", ",", "view", ",", "request", ")", ":", "valid_fields", "=", "[", "item", "[", "0", "]", "for", "item", "in", "self", ".", "get_valid_fields", "(", "queryset", ",", "view", ",", ...
Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to validate that all provided sort fields exist (as contrasted with the super's behavior which is to silently remove invalid fields). :raises ValidationError: if a sort field is invalid.
[ "Extend", ":", "py", ":", "meth", ":", "rest_framework", ".", "filters", ".", "OrderingFilter", ".", "remove_invalid_fields", "to", "validate", "that", "all", "provided", "sort", "fields", "exist", "(", "as", "contrasted", "with", "the", "super", "s", "behavio...
python
train
49.823529
saltstack/salt
salt/utils/args.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/args.py#L288-L301
def shlex_split(s, **kwargs): ''' Only split if variable is a string ''' if isinstance(s, six.string_types): # On PY2, shlex.split will fail with unicode types if there are # non-ascii characters in the string. So, we need to make sure we # invoke it with a str type, and then decode the resulting string back # to unicode to return it. return salt.utils.data.decode( shlex.split(salt.utils.stringutils.to_str(s), **kwargs) ) else: return s
[ "def", "shlex_split", "(", "s", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "string_types", ")", ":", "# On PY2, shlex.split will fail with unicode types if there are", "# non-ascii characters in the string. So, we need to make sure we"...
Only split if variable is a string
[ "Only", "split", "if", "variable", "is", "a", "string" ]
python
train
36.785714
googleads/googleads-python-lib
googleads/adwords.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/adwords.py#L1940-L1951
def StartsWithIgnoreCase(self, value): """Sets the type of the WHERE clause as "starts with ignore case". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to. """ self._awql = self._CreateSingleValueCondition(value, 'STARTS_WITH_IGNORE_CASE') return self._query_builder
[ "def", "StartsWithIgnoreCase", "(", "self", ",", "value", ")", ":", "self", ".", "_awql", "=", "self", ".", "_CreateSingleValueCondition", "(", "value", ",", "'STARTS_WITH_IGNORE_CASE'", ")", "return", "self", ".", "_query_builder" ]
Sets the type of the WHERE clause as "starts with ignore case". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
[ "Sets", "the", "type", "of", "the", "WHERE", "clause", "as", "starts", "with", "ignore", "case", "." ]
python
train
34.333333
klen/muffin
muffin/app.py
https://github.com/klen/muffin/blob/7bc891e174e08b62d1ae232b5d45f8cd8bc82112/muffin/app.py#L266-L284
def _exc_middleware_factory(app): """Handle exceptions. Route exceptions to handlers if they are registered in application. """ @web.middleware async def middleware(request, handler): try: return await handler(request) except Exception as exc: for cls in type(exc).mro(): if cls in app._error_handlers: request.exception = exc response = await app._error_handlers[cls](request) return response raise return middleware
[ "def", "_exc_middleware_factory", "(", "app", ")", ":", "@", "web", ".", "middleware", "async", "def", "middleware", "(", "request", ",", "handler", ")", ":", "try", ":", "return", "await", "handler", "(", "request", ")", "except", "Exception", "as", "exc"...
Handle exceptions. Route exceptions to handlers if they are registered in application.
[ "Handle", "exceptions", "." ]
python
train
29.157895
BenjaminSchubert/NitPycker
nitpycker/result.py
https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/result.py#L72-L79
def addSuccess(self, test: unittest.case.TestCase) -> None: """ Transforms the test in a serializable version of it and sends it to a queue for further analysis :param test: the test to save """ # noinspection PyTypeChecker self.add_result(TestState.success, test)
[ "def", "addSuccess", "(", "self", ",", "test", ":", "unittest", ".", "case", ".", "TestCase", ")", "->", "None", ":", "# noinspection PyTypeChecker", "self", ".", "add_result", "(", "TestState", ".", "success", ",", "test", ")" ]
Transforms the test in a serializable version of it and sends it to a queue for further analysis :param test: the test to save
[ "Transforms", "the", "test", "in", "a", "serializable", "version", "of", "it", "and", "sends", "it", "to", "a", "queue", "for", "further", "analysis" ]
python
train
38.25
modin-project/modin
modin/pandas/base.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1643-L1657
def mul(self, other, axis="columns", level=None, fill_value=None): """Multiplies this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the multiply against this. axis: The axis to multiply over. level: The Multilevel index level to apply multiply over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Multiply applied. """ return self._binary_op( "mul", other, axis=axis, level=level, fill_value=fill_value )
[ "def", "mul", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"mul\"", ",", "other", ",", "axis", "=", "axis", ",", "level", ...
Multiplies this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the multiply against this. axis: The axis to multiply over. level: The Multilevel index level to apply multiply over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Multiply applied.
[ "Multiplies", "this", "DataFrame", "against", "another", "DataFrame", "/", "Series", "/", "scalar", ".", "Args", ":", "other", ":", "The", "object", "to", "use", "to", "apply", "the", "multiply", "against", "this", ".", "axis", ":", "The", "axis", "to", ...
python
train
39.933333
acorg/dark-matter
dark/fasta.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/fasta.py#L57-L81
def fastaSubtract(fastaFiles): """ Given a list of open file descriptors, each with FASTA content, remove the reads found in the 2nd, 3rd, etc files from the first file in the list. @param fastaFiles: a C{list} of FASTA filenames. @raises IndexError: if passed an empty list. @return: An iterator producing C{Bio.SeqRecord} instances suitable for writing to a file using C{Bio.SeqIO.write}. """ reads = {} firstFile = fastaFiles.pop(0) for seq in SeqIO.parse(firstFile, 'fasta'): reads[seq.id] = seq for fastaFile in fastaFiles: for seq in SeqIO.parse(fastaFile, 'fasta'): # Make sure that reads with the same id have the same sequence. if seq.id in reads: assert str(seq.seq) == str(reads[seq.id].seq) reads.pop(seq.id, None) return iter(reads.values())
[ "def", "fastaSubtract", "(", "fastaFiles", ")", ":", "reads", "=", "{", "}", "firstFile", "=", "fastaFiles", ".", "pop", "(", "0", ")", "for", "seq", "in", "SeqIO", ".", "parse", "(", "firstFile", ",", "'fasta'", ")", ":", "reads", "[", "seq", ".", ...
Given a list of open file descriptors, each with FASTA content, remove the reads found in the 2nd, 3rd, etc files from the first file in the list. @param fastaFiles: a C{list} of FASTA filenames. @raises IndexError: if passed an empty list. @return: An iterator producing C{Bio.SeqRecord} instances suitable for writing to a file using C{Bio.SeqIO.write}.
[ "Given", "a", "list", "of", "open", "file", "descriptors", "each", "with", "FASTA", "content", "remove", "the", "reads", "found", "in", "the", "2nd", "3rd", "etc", "files", "from", "the", "first", "file", "in", "the", "list", "." ]
python
train
34.48
samuraisam/django-json-rpc
jsonrpc/__init__.py
https://github.com/samuraisam/django-json-rpc/blob/a88d744d960e828f3eb21265da0f10a694b8ebcf/jsonrpc/__init__.py#L69-L117
def _parse_sig(sig, arg_names, validate=False): """ Parses signatures into a ``OrderedDict`` of paramName => type. Numerically-indexed arguments that do not correspond to an argument name in python (ie: it takes a variable number of arguments) will be keyed as the stringified version of it's index. sig the signature to be parsed arg_names a list of argument names extracted from python source Returns a tuple of (method name, types dict, return type) """ d = SIG_RE.match(sig) if not d: raise ValueError('Invalid method signature %s' % sig) d = d.groupdict() ret = [(n, Any) for n in arg_names] if 'args_sig' in d and type( d['args_sig']) is str and d['args_sig'].strip(): for i, arg in enumerate(d['args_sig'].strip().split(',')): _type_checking_available(sig, validate) if '=' in arg: if not type(ret) is OrderedDict: ret = OrderedDict(ret) dk = KWARG_RE.match(arg) if not dk: raise ValueError('Could not parse arg type %s in %s' % (arg, sig)) dk = dk.groupdict() if not sum( [(k in dk and type(dk[k]) is str and bool(dk[k].strip())) for k in ('arg_name', 'arg_type')]): raise ValueError('Invalid kwarg value %s in %s' % (arg, sig)) ret[dk['arg_name']] = _eval_arg_type(dk['arg_type'], None, arg, sig) else: if type(ret) is OrderedDict: raise ValueError('Positional arguments must occur ' 'before keyword arguments in %s' % sig) if len(ret) < i + 1: ret.append((str(i), _eval_arg_type(arg, None, arg, sig))) else: ret[i] = (ret[i][0], _eval_arg_type(arg, None, arg, sig)) if not type(ret) is OrderedDict: ret = OrderedDict(ret) return (d['method_name'], ret, (_eval_arg_type(d['return_sig'], Any, 'return', sig) if d['return_sig'] else Any))
[ "def", "_parse_sig", "(", "sig", ",", "arg_names", ",", "validate", "=", "False", ")", ":", "d", "=", "SIG_RE", ".", "match", "(", "sig", ")", "if", "not", "d", ":", "raise", "ValueError", "(", "'Invalid method signature %s'", "%", "sig", ")", "d", "="...
Parses signatures into a ``OrderedDict`` of paramName => type. Numerically-indexed arguments that do not correspond to an argument name in python (ie: it takes a variable number of arguments) will be keyed as the stringified version of it's index. sig the signature to be parsed arg_names a list of argument names extracted from python source Returns a tuple of (method name, types dict, return type)
[ "Parses", "signatures", "into", "a", "OrderedDict", "of", "paramName", "=", ">", "type", ".", "Numerically", "-", "indexed", "arguments", "that", "do", "not", "correspond", "to", "an", "argument", "name", "in", "python", "(", "ie", ":", "it", "takes", "a",...
python
train
45.857143
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/FS.py#L1345-L1355
def File(self, name, directory = None, create = 1): """Look up or create a File node with the specified name. If the name is a relative path (begins with ./, ../, or a file name), then it is looked up relative to the supplied directory node, or to the top level directory of the FS (supplied at construction time) if no directory is supplied. This method will raise TypeError if a directory is found at the specified path. """ return self._lookup(name, directory, File, create)
[ "def", "File", "(", "self", ",", "name", ",", "directory", "=", "None", ",", "create", "=", "1", ")", ":", "return", "self", ".", "_lookup", "(", "name", ",", "directory", ",", "File", ",", "create", ")" ]
Look up or create a File node with the specified name. If the name is a relative path (begins with ./, ../, or a file name), then it is looked up relative to the supplied directory node, or to the top level directory of the FS (supplied at construction time) if no directory is supplied. This method will raise TypeError if a directory is found at the specified path.
[ "Look", "up", "or", "create", "a", "File", "node", "with", "the", "specified", "name", ".", "If", "the", "name", "is", "a", "relative", "path", "(", "begins", "with", ".", "/", "..", "/", "or", "a", "file", "name", ")", "then", "it", "is", "looked"...
python
train
49.181818
uw-it-aca/uw-restclients-canvas
uw_canvas/admins.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/admins.py#L28-L39
def create_admin(self, account_id, user_id, role): """ Flag an existing user as an admin within the account. https://canvas.instructure.com/doc/api/admins.html#method.admins.create """ url = ADMINS_API.format(account_id) body = {"user_id": unquote(str(user_id)), "role": role, "send_confirmation": False} return CanvasAdmin(data=self._post_resource(url, body))
[ "def", "create_admin", "(", "self", ",", "account_id", ",", "user_id", ",", "role", ")", ":", "url", "=", "ADMINS_API", ".", "format", "(", "account_id", ")", "body", "=", "{", "\"user_id\"", ":", "unquote", "(", "str", "(", "user_id", ")", ")", ",", ...
Flag an existing user as an admin within the account. https://canvas.instructure.com/doc/api/admins.html#method.admins.create
[ "Flag", "an", "existing", "user", "as", "an", "admin", "within", "the", "account", "." ]
python
test
36.583333
toomore/grs
grs/best_buy_or_sell.py
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/best_buy_or_sell.py#L50-L57
def best_buy_1(self): """ 量大收紅 :rtype: bool """ result = self.data.value[-1] > self.data.value[-2] and \ self.data.price[-1] > self.data.openprice[-1] return result
[ "def", "best_buy_1", "(", "self", ")", ":", "result", "=", "self", ".", "data", ".", "value", "[", "-", "1", "]", ">", "self", ".", "data", ".", "value", "[", "-", "2", "]", "and", "self", ".", "data", ".", "price", "[", "-", "1", "]", ">", ...
量大收紅 :rtype: bool
[ "量大收紅" ]
python
train
27.375
googledatalab/pydatalab
solutionbox/ml_workbench/tensorflow/transform.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/transform.py#L431-L506
def preprocess(pipeline, args): """Transfrom csv data into transfromed tf.example files. Outline: 1) read the input data (as csv or bigquery) into a dict format 2) replace image paths with base64 encoded image files 3) build a csv input string with images paths replaced with base64. This matches the serving csv that a trained model would expect. 4) batch the csv strings 5) run the transformations 6) write the results to tf.example files and save any errors. """ from tensorflow.python.lib.io import file_io from trainer import feature_transforms schema = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode()) features = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode()) stats = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode()) column_names = [col['name'] for col in schema] if args.csv: all_files = [] for i, file_pattern in enumerate(args.csv): all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern)) raw_data = ( all_files | 'MergeCSVFiles' >> beam.Flatten() | 'ParseCSVData' >> beam.Map(decode_csv, column_names)) else: columns = ', '.join(column_names) query = 'SELECT {columns} FROM `{table}`'.format(columns=columns, table=args.bigquery) raw_data = ( pipeline | 'ReadBiqQueryData' >> beam.io.Read(beam.io.BigQuerySource(query=query, use_standard_sql=True))) # Note that prepare_image_transforms does not make embeddings, it justs reads # the image files and converts them to byte stings. TransformFeaturesDoFn() # will make the image embeddings. image_columns = image_transform_columns(features) clean_csv_data = ( raw_data | 'PreprocessTransferredLearningTransformations' >> beam.Map(prepare_image_transforms, image_columns) | 'BuildCSVString' >> beam.Map(encode_csv, column_names)) if args.shuffle: clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle() transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats) (transformed_data, errors) = ( clean_csv_data | 'Batch Input' >> beam.ParDo(EmitAsBatchDoFn(args.batch_size)) | 'Run TF Graph on Batches' >> beam.ParDo(transform_dofn).with_outputs('errors', main='main')) _ = (transformed_data | 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema)) | 'WriteExamples' >> beam.io.WriteToTFRecord( os.path.join(args.output, args.prefix), file_name_suffix='.tfrecord.gz')) _ = (errors | 'WriteErrors' >> beam.io.WriteToText( os.path.join(args.output, 'errors_' + args.prefix), file_name_suffix='.txt'))
[ "def", "preprocess", "(", "pipeline", ",", "args", ")", ":", "from", "tensorflow", ".", "python", ".", "lib", ".", "io", "import", "file_io", "from", "trainer", "import", "feature_transforms", "schema", "=", "json", ".", "loads", "(", "file_io", ".", "read...
Transfrom csv data into transfromed tf.example files. Outline: 1) read the input data (as csv or bigquery) into a dict format 2) replace image paths with base64 encoded image files 3) build a csv input string with images paths replaced with base64. This matches the serving csv that a trained model would expect. 4) batch the csv strings 5) run the transformations 6) write the results to tf.example files and save any errors.
[ "Transfrom", "csv", "data", "into", "transfromed", "tf", ".", "example", "files", "." ]
python
train
39.776316
eumis/pyviews
pyviews/core/observable.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L110-L112
def observe_all(self, callback: Callable[[str, Any, Any], None]): """Subscribes to all keys changes""" self._all_callbacks.append(callback)
[ "def", "observe_all", "(", "self", ",", "callback", ":", "Callable", "[", "[", "str", ",", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "self", ".", "_all_callbacks", ".", "append", "(", "callback", ")" ]
Subscribes to all keys changes
[ "Subscribes", "to", "all", "keys", "changes" ]
python
train
51
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L496-L539
def _execute_expression(self, expression: Any): """ This does the bulk of the work of executing a logical form, recursively executing a single expression. Basically, if the expression is a function we know about, we evaluate its arguments then call the function. If it's a list, we evaluate all elements of the list. If it's a constant (or a zero-argument function), we evaluate the constant. """ # pylint: disable=too-many-return-statements if isinstance(expression, list): if isinstance(expression[0], list): function = self._execute_expression(expression[0]) elif expression[0] in self._functions: function = self._functions[expression[0]] else: if isinstance(expression[0], str): raise ExecutionError(f"Unrecognized function: {expression[0]}") else: raise ExecutionError(f"Unsupported expression type: {expression}") arguments = [self._execute_expression(arg) for arg in expression[1:]] try: return function(*arguments) except (TypeError, ValueError): traceback.print_exc() raise ExecutionError(f"Error executing expression {expression} (see stderr for stack trace)") elif isinstance(expression, str): if expression not in self._functions: raise ExecutionError(f"Unrecognized constant: {expression}") # This is a bit of a quirk in how we represent constants and zero-argument functions. # For consistency, constants are wrapped in a zero-argument lambda. So both constants # and zero-argument functions are callable in `self._functions`, and are `BasicTypes` # in `self._function_types`. For these, we want to return # `self._functions[expression]()` _calling_ the zero-argument function. If we get a # `FunctionType` in here, that means we're referring to the function as a first-class # object, instead of calling it (maybe as an argument to a higher-order function). In # that case, we return the function _without_ calling it. # Also, we just check the first function type here, because we assume you haven't # registered the same function with both a constant type and a `FunctionType`. if isinstance(self._function_types[expression][0], FunctionType): return self._functions[expression] else: return self._functions[expression]() return self._functions[expression] else: raise ExecutionError("Not sure how you got here. Please open a github issue with details.")
[ "def", "_execute_expression", "(", "self", ",", "expression", ":", "Any", ")", ":", "# pylint: disable=too-many-return-statements", "if", "isinstance", "(", "expression", ",", "list", ")", ":", "if", "isinstance", "(", "expression", "[", "0", "]", ",", "list", ...
This does the bulk of the work of executing a logical form, recursively executing a single expression. Basically, if the expression is a function we know about, we evaluate its arguments then call the function. If it's a list, we evaluate all elements of the list. If it's a constant (or a zero-argument function), we evaluate the constant.
[ "This", "does", "the", "bulk", "of", "the", "work", "of", "executing", "a", "logical", "form", "recursively", "executing", "a", "single", "expression", ".", "Basically", "if", "the", "expression", "is", "a", "function", "we", "know", "about", "we", "evaluate...
python
train
62.886364
futapi/fut
fut/core.py
https://github.com/futapi/fut/blob/3792c9eee8f5884f38a02210e649c46c6c7a756d/fut/core.py#L949-L960
def cardInfo(self, resource_id): """Return card info. :params resource_id: Resource id. """ # TODO: add referer to headers (futweb) base_id = baseId(resource_id) if base_id in self.players: return self.players[base_id] else: # not a player? url = '{0}{1}.json'.format(card_info_url, base_id) return requests.get(url, timeout=self.timeout).json()
[ "def", "cardInfo", "(", "self", ",", "resource_id", ")", ":", "# TODO: add referer to headers (futweb)", "base_id", "=", "baseId", "(", "resource_id", ")", "if", "base_id", "in", "self", ".", "players", ":", "return", "self", ".", "players", "[", "base_id", "]...
Return card info. :params resource_id: Resource id.
[ "Return", "card", "info", "." ]
python
valid
35.666667
prompt-toolkit/ptpython
ptpython/history_browser.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/history_browser.py#L563-L580
def _default_buffer_pos_changed(self, _): """ When the cursor changes in the default buffer. Synchronize with history buffer. """ # Only when this buffer has the focus. if self.app.current_buffer == self.default_buffer: try: line_no = self.default_buffer.document.cursor_position_row - \ self.history_mapping.result_line_offset if line_no < 0: # When the cursor is above the inserted region. raise IndexError history_lineno = sorted(self.history_mapping.selected_lines)[line_no] except IndexError: pass else: self.history_buffer.cursor_position = \ self.history_buffer.document.translate_row_col_to_index(history_lineno, 0)
[ "def", "_default_buffer_pos_changed", "(", "self", ",", "_", ")", ":", "# Only when this buffer has the focus.", "if", "self", ".", "app", ".", "current_buffer", "==", "self", ".", "default_buffer", ":", "try", ":", "line_no", "=", "self", ".", "default_buffer", ...
When the cursor changes in the default buffer. Synchronize with history buffer.
[ "When", "the", "cursor", "changes", "in", "the", "default", "buffer", ".", "Synchronize", "with", "history", "buffer", "." ]
python
train
45.388889
geronimp/graftM
graftm/sequence_searcher.py
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L509-L567
def alignment_correcter(self, alignment_file_list, output_file_name, filter_minimum=None): ''' Remove lower case insertions in alignment outputs from HMM align. Give a list of alignments, and an output file name, and each alignment will be corrected, and written to a single file, ready to be placed together using pplacer. Parameters ---------- alignment_file_list : array List of strings, each the path to different alignments from the inputs provided to GraftM output_file_name : str The path and filename of the output file desired. filter_minimum : int minimum number of positions that must be aligned for each sequence Returns ------- True or False, depending if reads were written to file ''' corrected_sequences = {} for alignment_file in alignment_file_list: insert_list = [] # Define list containing inserted positions to be removed (lower case characters) sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta')) for sequence in sequence_list: # For each sequence in the alignment for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence if nt.islower(): # Check for lower case character insert_list.append(idx) # Add to the insert list if it is insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions for sequence in sequence_list: # For each sequence in the alignment new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing for position in insert_list: # For each position in the removal list del new_seq[position] # Delete that inserted position in every sequence corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-') pre_filter_count=len(corrected_sequences) if filter_minimum: # Use '>' not '>=' here because the sequence is on a single line, # but also includes a newline character at the end of the sequence corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum} post_filter_count=len(corrected_sequences) logging.info("Filtered %i short sequences from the alignment" % \ (pre_filter_count-post_filter_count) ) logging.info("%i sequences remaining" % post_filter_count) if len(corrected_sequences) >= 1: with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to for fasta_id, fasta_seq in corrected_sequences.iteritems(): output_file.write(fasta_id) output_file.write(fasta_seq) return True else: return False
[ "def", "alignment_correcter", "(", "self", ",", "alignment_file_list", ",", "output_file_name", ",", "filter_minimum", "=", "None", ")", ":", "corrected_sequences", "=", "{", "}", "for", "alignment_file", "in", "alignment_file_list", ":", "insert_list", "=", "[", ...
Remove lower case insertions in alignment outputs from HMM align. Give a list of alignments, and an output file name, and each alignment will be corrected, and written to a single file, ready to be placed together using pplacer. Parameters ---------- alignment_file_list : array List of strings, each the path to different alignments from the inputs provided to GraftM output_file_name : str The path and filename of the output file desired. filter_minimum : int minimum number of positions that must be aligned for each sequence Returns ------- True or False, depending if reads were written to file
[ "Remove", "lower", "case", "insertions", "in", "alignment", "outputs", "from", "HMM", "align", ".", "Give", "a", "list", "of", "alignments", "and", "an", "output", "file", "name", "and", "each", "alignment", "will", "be", "corrected", "and", "written", "to",...
python
train
53.169492
TomasTomecek/sen
sen/util.py
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/util.py#L60-L81
def humanize_bytes(bytesize, precision=2): """ Humanize byte size figures https://gist.github.com/moird/3684595 """ abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'bytes') ) if bytesize == 1: return '1 byte' for factor, suffix in abbrevs: if bytesize >= factor: break if factor == 1: precision = 0 return '%.*f %s' % (precision, bytesize / float(factor), suffix)
[ "def", "humanize_bytes", "(", "bytesize", ",", "precision", "=", "2", ")", ":", "abbrevs", "=", "(", "(", "1", "<<", "50", ",", "'PB'", ")", ",", "(", "1", "<<", "40", ",", "'TB'", ")", ",", "(", "1", "<<", "30", ",", "'GB'", ")", ",", "(", ...
Humanize byte size figures https://gist.github.com/moird/3684595
[ "Humanize", "byte", "size", "figures" ]
python
train
23.636364
lowandrew/OLCTools
coreGenome/core.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/core.py#L205-L231
def blastparser(self, report, sample, fieldnames): """ Parse the number of core genes present in the strain from the BLAST outputs :param report: the name and path of the BLAST outputs :param sample: the sample object :param fieldnames: type LIST: List of fields used to in BLAST analyses """ try: # Open the sequence profile file as a dictionary blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab') # Go through each BLAST result for row in blastdict: # Ignore the headers if row['query_id'].startswith(fieldnames[0]): pass else: # Calculate the percent identity and extract the bitscore from the row # Percent identity is the (length of the alignment - number of mismatches) / total subject length percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) / float(row['subject_length']) * 100)) # Split off any | and - from the sample name target = row['subject_id'].split('|')[0].split('-')[0] # If the hit passes the cutoff threshold, add it to the set of core genes present if percentidentity >= self.cutoff: sample[self.analysistype].coreset.add(target) except FileNotFoundError: pass
[ "def", "blastparser", "(", "self", ",", "report", ",", "sample", ",", "fieldnames", ")", ":", "try", ":", "# Open the sequence profile file as a dictionary", "blastdict", "=", "DictReader", "(", "open", "(", "report", ")", ",", "fieldnames", "=", "self", ".", ...
Parse the number of core genes present in the strain from the BLAST outputs :param report: the name and path of the BLAST outputs :param sample: the sample object :param fieldnames: type LIST: List of fields used to in BLAST analyses
[ "Parse", "the", "number", "of", "core", "genes", "present", "in", "the", "strain", "from", "the", "BLAST", "outputs", ":", "param", "report", ":", "the", "name", "and", "path", "of", "the", "BLAST", "outputs", ":", "param", "sample", ":", "the", "sample"...
python
train
57.185185
quodlibet/mutagen
mutagen/aac.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/aac.py#L41-L52
def find_stream(cls, fileobj, max_bytes): """Returns a possibly valid _ADTSStream or None. Args: max_bytes (int): maximum bytes to read """ r = BitReader(fileobj) stream = cls(r) if stream.sync(max_bytes): stream.offset = (r.get_position() - 12) // 8 return stream
[ "def", "find_stream", "(", "cls", ",", "fileobj", ",", "max_bytes", ")", ":", "r", "=", "BitReader", "(", "fileobj", ")", "stream", "=", "cls", "(", "r", ")", "if", "stream", ".", "sync", "(", "max_bytes", ")", ":", "stream", ".", "offset", "=", "(...
Returns a possibly valid _ADTSStream or None. Args: max_bytes (int): maximum bytes to read
[ "Returns", "a", "possibly", "valid", "_ADTSStream", "or", "None", "." ]
python
train
28.25
wndhydrnt/python-oauth2
oauth2/client_authenticator.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/client_authenticator.py#L61-L93
def by_identifier_secret(self, request): """ Authenticates a client by its identifier and secret (aka password). :param request: The incoming request :type request: oauth2.web.Request :return: The identified client :rtype: oauth2.datatype.Client :raises OAuthInvalidError: If the client could not be found, is not allowed to to use the current grant or supplied invalid credentials """ client_id, client_secret = self.source(request=request) try: client = self.client_store.fetch_by_client_id(client_id) except ClientNotFoundError: raise OAuthInvalidError(error="invalid_client", explanation="No client could be found") grant_type = request.post_param("grant_type") if client.grant_type_supported(grant_type) is False: raise OAuthInvalidError(error="unauthorized_client", explanation="The client is not allowed " "to use this grant type") if client.secret != client_secret: raise OAuthInvalidError(error="invalid_client", explanation="Invalid client credentials") return client
[ "def", "by_identifier_secret", "(", "self", ",", "request", ")", ":", "client_id", ",", "client_secret", "=", "self", ".", "source", "(", "request", "=", "request", ")", "try", ":", "client", "=", "self", ".", "client_store", ".", "fetch_by_client_id", "(", ...
Authenticates a client by its identifier and secret (aka password). :param request: The incoming request :type request: oauth2.web.Request :return: The identified client :rtype: oauth2.datatype.Client :raises OAuthInvalidError: If the client could not be found, is not allowed to to use the current grant or supplied invalid credentials
[ "Authenticates", "a", "client", "by", "its", "identifier", "and", "secret", "(", "aka", "password", ")", "." ]
python
train
40.787879