repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
knowmalware/camcrypt
camcrypt/__init__.py
CamCrypt.encrypt_block
def encrypt_block(self, plainText): """Encrypt a 16-byte block of data. NOTE: This function was formerly called `encrypt`, but was changed when support for encrypting arbitrary-length strings was added. Args: plainText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(plainText) != BLOCK_SIZE: raise ValueError("plainText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(plainText))) cipher = ctypes.create_string_buffer(BLOCK_SIZE) self.encblock(self.bitlen, plainText, self.keytable, cipher) return cipher.raw
python
def encrypt_block(self, plainText): """Encrypt a 16-byte block of data. NOTE: This function was formerly called `encrypt`, but was changed when support for encrypting arbitrary-length strings was added. Args: plainText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(plainText) != BLOCK_SIZE: raise ValueError("plainText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(plainText))) cipher = ctypes.create_string_buffer(BLOCK_SIZE) self.encblock(self.bitlen, plainText, self.keytable, cipher) return cipher.raw
[ "def", "encrypt_block", "(", "self", ",", "plainText", ")", ":", "if", "not", "self", ".", "initialized", ":", "raise", "TypeError", "(", "\"CamCrypt object has not been initialized\"", ")", "if", "len", "(", "plainText", ")", "!=", "BLOCK_SIZE", ":", "raise", ...
Encrypt a 16-byte block of data. NOTE: This function was formerly called `encrypt`, but was changed when support for encrypting arbitrary-length strings was added. Args: plainText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes.
[ "Encrypt", "a", "16", "-", "byte", "block", "of", "data", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L125-L148
knowmalware/camcrypt
camcrypt/__init__.py
CamCrypt.decrypt_block
def decrypt_block(self, cipherText): """Decrypt a 16-byte block of data. NOTE: This function was formerly called `decrypt`, but was changed when support for decrypting arbitrary-length strings was added. Args: cipherText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(cipherText) != BLOCK_SIZE: raise ValueError("cipherText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(cipherText))) plain = ctypes.create_string_buffer(BLOCK_SIZE) self.decblock(self.bitlen, cipherText, self.keytable, plain) return plain.raw
python
def decrypt_block(self, cipherText): """Decrypt a 16-byte block of data. NOTE: This function was formerly called `decrypt`, but was changed when support for decrypting arbitrary-length strings was added. Args: cipherText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes. """ if not self.initialized: raise TypeError("CamCrypt object has not been initialized") if len(cipherText) != BLOCK_SIZE: raise ValueError("cipherText must be %d bytes long (received %d bytes)" % (BLOCK_SIZE, len(cipherText))) plain = ctypes.create_string_buffer(BLOCK_SIZE) self.decblock(self.bitlen, cipherText, self.keytable, plain) return plain.raw
[ "def", "decrypt_block", "(", "self", ",", "cipherText", ")", ":", "if", "not", "self", ".", "initialized", ":", "raise", "TypeError", "(", "\"CamCrypt object has not been initialized\"", ")", "if", "len", "(", "cipherText", ")", "!=", "BLOCK_SIZE", ":", "raise",...
Decrypt a 16-byte block of data. NOTE: This function was formerly called `decrypt`, but was changed when support for decrypting arbitrary-length strings was added. Args: cipherText (str): 16-byte data. Returns: 16-byte str. Raises: TypeError if CamCrypt object has not been initialized. ValueError if `cipherText` is not BLOCK_SIZE (i.e. 16) bytes.
[ "Decrypt", "a", "16", "-", "byte", "block", "of", "data", "." ]
train
https://github.com/knowmalware/camcrypt/blob/40c9ebbbd33ebfbb3a564ee5768cfe7a1815f6a3/camcrypt/__init__.py#L150-L173
SylvanasSun/FishFishJump
fish_dashboard/fault.py
use_backup_if_fail
def use_backup_if_fail(app, key): """ Return a error flag for prompt message in front-end if failure times (unceasing fail) greater than max failure times else return backup data (latest data in the cache) """ lock.acquire() try: if key not in backup: backup[key] = {} if key in fail_times and fail_times[key] % app.config[MAX_FAILURE_TIMES] == 0: logger.error( '<SERVER KEY %s> At present already reaching the upper limit of the max failure times, failure times: %s' % ( key, fail_times[key])) message = {app.config[MAX_FAILURE_MESSAGE_KEY]: MAX_FAILURE_MESSAGE % key} if alarm_email is not None: _send_alarm_email('Happened fault!', MAX_FAILURE_MESSAGE % key) return unite_dict(backup[key], message) else: logger.info('<SERVER KEY %s> Request fail or in a status of sleep time window and return backup data %s' % ( key, backup[key])) return backup[key] finally: lock.release()
python
def use_backup_if_fail(app, key): """ Return a error flag for prompt message in front-end if failure times (unceasing fail) greater than max failure times else return backup data (latest data in the cache) """ lock.acquire() try: if key not in backup: backup[key] = {} if key in fail_times and fail_times[key] % app.config[MAX_FAILURE_TIMES] == 0: logger.error( '<SERVER KEY %s> At present already reaching the upper limit of the max failure times, failure times: %s' % ( key, fail_times[key])) message = {app.config[MAX_FAILURE_MESSAGE_KEY]: MAX_FAILURE_MESSAGE % key} if alarm_email is not None: _send_alarm_email('Happened fault!', MAX_FAILURE_MESSAGE % key) return unite_dict(backup[key], message) else: logger.info('<SERVER KEY %s> Request fail or in a status of sleep time window and return backup data %s' % ( key, backup[key])) return backup[key] finally: lock.release()
[ "def", "use_backup_if_fail", "(", "app", ",", "key", ")", ":", "lock", ".", "acquire", "(", ")", "try", ":", "if", "key", "not", "in", "backup", ":", "backup", "[", "key", "]", "=", "{", "}", "if", "key", "in", "fail_times", "and", "fail_times", "[...
Return a error flag for prompt message in front-end if failure times (unceasing fail) greater than max failure times else return backup data (latest data in the cache)
[ "Return", "a", "error", "flag", "for", "prompt", "message", "in", "front", "-", "end", "if", "failure", "times", "(", "unceasing", "fail", ")", "greater", "than", "max", "failure", "times", "else", "return", "backup", "data", "(", "latest", "data", "in", ...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/fault.py#L64-L86
SylvanasSun/FishFishJump
fish_dashboard/fault.py
is_sleep
def is_sleep(key): """ Determine return data by use cache if this key is in the sleep time window(happened error) """ lock.acquire() try: if key not in sleep_record: return False return time.time() < sleep_record[key] finally: lock.release()
python
def is_sleep(key): """ Determine return data by use cache if this key is in the sleep time window(happened error) """ lock.acquire() try: if key not in sleep_record: return False return time.time() < sleep_record[key] finally: lock.release()
[ "def", "is_sleep", "(", "key", ")", ":", "lock", ".", "acquire", "(", ")", "try", ":", "if", "key", "not", "in", "sleep_record", ":", "return", "False", "return", "time", ".", "time", "(", ")", "<", "sleep_record", "[", "key", "]", "finally", ":", ...
Determine return data by use cache if this key is in the sleep time window(happened error)
[ "Determine", "return", "data", "by", "use", "cache", "if", "this", "key", "is", "in", "the", "sleep", "time", "window", "(", "happened", "error", ")" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/fault.py#L101-L111
darkfeline/animanager
animanager/commands/update.py
command
def command(state, args): """Add an anime from an AniDB search.""" args = parser.parse_args(args[1:]) if args.watching: rows = query.select.select(state.db, 'regexp IS NOT NULL', [], ['aid']) aids = [anime.aid for anime in rows] elif args.incomplete: rows = query.select.select(state.db, 'enddate IS NULL', [], ['aid']) aids = [anime.aid for anime in rows] else: aid = state.results.parse_aid(args.aid, default_key='db') aids = [aid] if not aids: return anime = request_anime(aids.pop()) query.update.add(state.db, anime) print('Updated {} {}'.format(anime.aid, anime.title)) for aid in aids: time.sleep(2) anime = request_anime(aid) query.update.add(state.db, anime) print('Updated {} {}'.format(anime.aid, anime.title))
python
def command(state, args): """Add an anime from an AniDB search.""" args = parser.parse_args(args[1:]) if args.watching: rows = query.select.select(state.db, 'regexp IS NOT NULL', [], ['aid']) aids = [anime.aid for anime in rows] elif args.incomplete: rows = query.select.select(state.db, 'enddate IS NULL', [], ['aid']) aids = [anime.aid for anime in rows] else: aid = state.results.parse_aid(args.aid, default_key='db') aids = [aid] if not aids: return anime = request_anime(aids.pop()) query.update.add(state.db, anime) print('Updated {} {}'.format(anime.aid, anime.title)) for aid in aids: time.sleep(2) anime = request_anime(aid) query.update.add(state.db, anime) print('Updated {} {}'.format(anime.aid, anime.title))
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "if", "args", ".", "watching", ":", "rows", "=", "query", ".", "select", ".", "select", "(", "state", ".", "db"...
Add an anime from an AniDB search.
[ "Add", "an", "anime", "from", "an", "AniDB", "search", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/update.py#L25-L46
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.restart
def restart(self): """ Performs a soft reload of the HAProxy process. """ version = self.get_version() command = [ "haproxy", "-f", self.config_file_path, "-p", self.pid_file_path ] if version and version >= (1, 5, 0): command.extend(["-L", self.peer.name]) if os.path.exists(self.pid_file_path): with open(self.pid_file_path) as fd: command.extend(["-sf", fd.read().replace("\n", "")]) try: output = subprocess.check_output(command) except subprocess.CalledProcessError as e: logger.error("Failed to restart HAProxy: %s", str(e)) return if output: logging.error("haproxy says: %s", output) logger.info("Gracefully restarted HAProxy.")
python
def restart(self): """ Performs a soft reload of the HAProxy process. """ version = self.get_version() command = [ "haproxy", "-f", self.config_file_path, "-p", self.pid_file_path ] if version and version >= (1, 5, 0): command.extend(["-L", self.peer.name]) if os.path.exists(self.pid_file_path): with open(self.pid_file_path) as fd: command.extend(["-sf", fd.read().replace("\n", "")]) try: output = subprocess.check_output(command) except subprocess.CalledProcessError as e: logger.error("Failed to restart HAProxy: %s", str(e)) return if output: logging.error("haproxy says: %s", output) logger.info("Gracefully restarted HAProxy.")
[ "def", "restart", "(", "self", ")", ":", "version", "=", "self", ".", "get_version", "(", ")", "command", "=", "[", "\"haproxy\"", ",", "\"-f\"", ",", "self", ".", "config_file_path", ",", "\"-p\"", ",", "self", ".", "pid_file_path", "]", "if", "version"...
Performs a soft reload of the HAProxy process.
[ "Performs", "a", "soft", "reload", "of", "the", "HAProxy", "process", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L40-L65
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.get_version
def get_version(self): """ Returns a tuple representing the installed HAProxy version. The value of the tuple is (<major>, <minor>, <patch>), e.g. if HAProxy version 1.5.3 is installed, this will return `(1, 5, 3)`. """ command = ["haproxy", "-v"] try: output = subprocess.check_output(command) version_line = output.split("\n")[0] except subprocess.CalledProcessError as e: logger.error("Could not get HAProxy version: %s", str(e)) return None match = version_re.match(version_line) if not match: logger.error("Could not parse version from '%s'", version_line) return None version = ( int(match.group("major")), int(match.group("minor")), int(match.group("patch")) ) logger.debug("Got HAProxy version: %s", version) return version
python
def get_version(self): """ Returns a tuple representing the installed HAProxy version. The value of the tuple is (<major>, <minor>, <patch>), e.g. if HAProxy version 1.5.3 is installed, this will return `(1, 5, 3)`. """ command = ["haproxy", "-v"] try: output = subprocess.check_output(command) version_line = output.split("\n")[0] except subprocess.CalledProcessError as e: logger.error("Could not get HAProxy version: %s", str(e)) return None match = version_re.match(version_line) if not match: logger.error("Could not parse version from '%s'", version_line) return None version = ( int(match.group("major")), int(match.group("minor")), int(match.group("patch")) ) logger.debug("Got HAProxy version: %s", version) return version
[ "def", "get_version", "(", "self", ")", ":", "command", "=", "[", "\"haproxy\"", ",", "\"-v\"", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "command", ")", "version_line", "=", "output", ".", "split", "(", "\"\\n\"", ")", "[",...
Returns a tuple representing the installed HAProxy version. The value of the tuple is (<major>, <minor>, <patch>), e.g. if HAProxy version 1.5.3 is installed, this will return `(1, 5, 3)`.
[ "Returns", "a", "tuple", "representing", "the", "installed", "HAProxy", "version", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L67-L95
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.get_info
def get_info(self): """ Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results. """ info_response = self.send_command("show info") if not info_response: return {} def convert_camel_case(string): return all_cap_re.sub( r'\1_\2', first_cap_re.sub(r'\1_\2', string) ).lower() return dict( (convert_camel_case(label), value) for label, value in [ line.split(": ") for line in info_response.split("\n") ] )
python
def get_info(self): """ Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results. """ info_response = self.send_command("show info") if not info_response: return {} def convert_camel_case(string): return all_cap_re.sub( r'\1_\2', first_cap_re.sub(r'\1_\2', string) ).lower() return dict( (convert_camel_case(label), value) for label, value in [ line.split(": ") for line in info_response.split("\n") ] )
[ "def", "get_info", "(", "self", ")", ":", "info_response", "=", "self", ".", "send_command", "(", "\"show info\"", ")", "if", "not", "info_response", ":", "return", "{", "}", "def", "convert_camel_case", "(", "string", ")", ":", "return", "all_cap_re", ".", ...
Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results.
[ "Parses", "the", "output", "of", "a", "show", "info", "HAProxy", "command", "and", "returns", "a", "simple", "dictionary", "of", "the", "results", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L97-L119
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.get_active_nodes
def get_active_nodes(self): """ Returns a dictionary of lists, where the key is the name of a service and the list includes all active nodes associated with that service. """ # the -1 4 -1 args are the filters <proxy_id> <type> <server_id>, # -1 for all proxies, 4 for servers only, -1 for all servers stats_response = self.send_command("show stat -1 4 -1") if not stats_response: return [] lines = stats_response.split("\n") fields = lines.pop(0).split(",") # the first field is the service name, which we key off of so # it's not included in individual node records fields.pop(0) active_nodes = collections.defaultdict(list) for line in lines: values = line.split(",") service_name = values.pop(0) active_nodes[service_name].append( dict( (fields[i], values[i]) for i in range(len(fields)) ) ) return active_nodes
python
def get_active_nodes(self): """ Returns a dictionary of lists, where the key is the name of a service and the list includes all active nodes associated with that service. """ # the -1 4 -1 args are the filters <proxy_id> <type> <server_id>, # -1 for all proxies, 4 for servers only, -1 for all servers stats_response = self.send_command("show stat -1 4 -1") if not stats_response: return [] lines = stats_response.split("\n") fields = lines.pop(0).split(",") # the first field is the service name, which we key off of so # it's not included in individual node records fields.pop(0) active_nodes = collections.defaultdict(list) for line in lines: values = line.split(",") service_name = values.pop(0) active_nodes[service_name].append( dict( (fields[i], values[i]) for i in range(len(fields)) ) ) return active_nodes
[ "def", "get_active_nodes", "(", "self", ")", ":", "# the -1 4 -1 args are the filters <proxy_id> <type> <server_id>,", "# -1 for all proxies, 4 for servers only, -1 for all servers", "stats_response", "=", "self", ".", "send_command", "(", "\"show stat -1 4 -1\"", ")", "if", "not",...
Returns a dictionary of lists, where the key is the name of a service and the list includes all active nodes associated with that service.
[ "Returns", "a", "dictionary", "of", "lists", "where", "the", "key", "is", "the", "name", "of", "a", "service", "and", "the", "list", "includes", "all", "active", "nodes", "associated", "with", "that", "service", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L121-L150
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.enable_node
def enable_node(self, service_name, node_name): """ Enables a given node name for the given service name via the "enable server" HAProxy command. """ logger.info("Enabling server %s/%s", service_name, node_name) return self.send_command( "enable server %s/%s" % (service_name, node_name) )
python
def enable_node(self, service_name, node_name): """ Enables a given node name for the given service name via the "enable server" HAProxy command. """ logger.info("Enabling server %s/%s", service_name, node_name) return self.send_command( "enable server %s/%s" % (service_name, node_name) )
[ "def", "enable_node", "(", "self", ",", "service_name", ",", "node_name", ")", ":", "logger", ".", "info", "(", "\"Enabling server %s/%s\"", ",", "service_name", ",", "node_name", ")", "return", "self", ".", "send_command", "(", "\"enable server %s/%s\"", "%", "...
Enables a given node name for the given service name via the "enable server" HAProxy command.
[ "Enables", "a", "given", "node", "name", "for", "the", "given", "service", "name", "via", "the", "enable", "server", "HAProxy", "command", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L152-L160
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.disable_node
def disable_node(self, service_name, node_name): """ Disables a given node name for the given service name via the "disable server" HAProxy command. """ logger.info("Disabling server %s/%s", service_name, node_name) return self.send_command( "disable server %s/%s" % (service_name, node_name) )
python
def disable_node(self, service_name, node_name): """ Disables a given node name for the given service name via the "disable server" HAProxy command. """ logger.info("Disabling server %s/%s", service_name, node_name) return self.send_command( "disable server %s/%s" % (service_name, node_name) )
[ "def", "disable_node", "(", "self", ",", "service_name", ",", "node_name", ")", ":", "logger", ".", "info", "(", "\"Disabling server %s/%s\"", ",", "service_name", ",", "node_name", ")", "return", "self", ".", "send_command", "(", "\"disable server %s/%s\"", "%", ...
Disables a given node name for the given service name via the "disable server" HAProxy command.
[ "Disables", "a", "given", "node", "name", "for", "the", "given", "service", "name", "via", "the", "disable", "server", "HAProxy", "command", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L162-L170
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.send_command
def send_command(self, command): """ Sends a given command to the HAProxy control socket. Returns the response from the socket as a string. If a known error response (e.g. "Permission denied.") is given then the appropriate exception is raised. """ logger.debug("Connecting to socket %s", self.socket_file_path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(self.socket_file_path) except IOError as e: if e.errno == errno.ECONNREFUSED: logger.error("Connection refused. Is HAProxy running?") return else: raise sock.sendall((command + "\n").encode()) response = b"" while True: try: chunk = sock.recv(SOCKET_BUFFER_SIZE) if chunk: response += chunk else: break except IOError as e: if e.errno not in (errno.EAGAIN, errno.EINTR): raise sock.close() return self.process_command_response(command, response)
python
def send_command(self, command): """ Sends a given command to the HAProxy control socket. Returns the response from the socket as a string. If a known error response (e.g. "Permission denied.") is given then the appropriate exception is raised. """ logger.debug("Connecting to socket %s", self.socket_file_path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(self.socket_file_path) except IOError as e: if e.errno == errno.ECONNREFUSED: logger.error("Connection refused. Is HAProxy running?") return else: raise sock.sendall((command + "\n").encode()) response = b"" while True: try: chunk = sock.recv(SOCKET_BUFFER_SIZE) if chunk: response += chunk else: break except IOError as e: if e.errno not in (errno.EAGAIN, errno.EINTR): raise sock.close() return self.process_command_response(command, response)
[ "def", "send_command", "(", "self", ",", "command", ")", ":", "logger", ".", "debug", "(", "\"Connecting to socket %s\"", ",", "self", ".", "socket_file_path", ")", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "S...
Sends a given command to the HAProxy control socket. Returns the response from the socket as a string. If a known error response (e.g. "Permission denied.") is given then the appropriate exception is raised.
[ "Sends", "a", "given", "command", "to", "the", "HAProxy", "control", "socket", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L172-L208
wglass/lighthouse
lighthouse/haproxy/control.py
HAProxyControl.process_command_response
def process_command_response(self, command, response): """ Takes an HAProxy socket command and its response and either raises an appropriate exception or returns the formatted response. """ if response.startswith(b"Unknown command."): raise UnknownCommandError(command) if response == b"Permission denied.\n": raise PermissionError(command) if response == b"No such backend.\n": raise UnknownServerError(command) response = response.decode() return response.rstrip("\n")
python
def process_command_response(self, command, response): """ Takes an HAProxy socket command and its response and either raises an appropriate exception or returns the formatted response. """ if response.startswith(b"Unknown command."): raise UnknownCommandError(command) if response == b"Permission denied.\n": raise PermissionError(command) if response == b"No such backend.\n": raise UnknownServerError(command) response = response.decode() return response.rstrip("\n")
[ "def", "process_command_response", "(", "self", ",", "command", ",", "response", ")", ":", "if", "response", ".", "startswith", "(", "b\"Unknown command.\"", ")", ":", "raise", "UnknownCommandError", "(", "command", ")", "if", "response", "==", "b\"Permission deni...
Takes an HAProxy socket command and its response and either raises an appropriate exception or returns the formatted response.
[ "Takes", "an", "HAProxy", "socket", "command", "and", "its", "response", "and", "either", "raises", "an", "appropriate", "exception", "or", "returns", "the", "formatted", "response", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L210-L223
frnsys/broca
broca/distance/levenshtein.py
levenshtein
def levenshtein(source, target): """ Cribbed from <https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python> """ if len(source) < len(target): return levenshtein(target, source) # So now we have len(source) >= len(target). if len(target) == 0: return len(source) # We call tuple() to force strings to be used as sequences # ('c', 'a', 't', 's') - numpy uses them as values by default. source = np.array(tuple(source)) target = np.array(tuple(target)) # We use a dynamic programming algorithm, but with the # added optimization that we only need the last two rows # of the matrix. previous_row = np.arange(target.size + 1) for s in source: # Insertion (target grows longer than source): current_row = previous_row + 1 # Substitution or matching: # Target and source items are aligned, and either # are different (cost of 1), or are the same (cost of 0). current_row[1:] = np.minimum( current_row[1:], np.add(previous_row[:-1], target != s)) # Deletion (target grows shorter than source): current_row[1:] = np.minimum( current_row[1:], current_row[0:-1] + 1) previous_row = current_row return previous_row[-1]
python
def levenshtein(source, target): """ Cribbed from <https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python> """ if len(source) < len(target): return levenshtein(target, source) # So now we have len(source) >= len(target). if len(target) == 0: return len(source) # We call tuple() to force strings to be used as sequences # ('c', 'a', 't', 's') - numpy uses them as values by default. source = np.array(tuple(source)) target = np.array(tuple(target)) # We use a dynamic programming algorithm, but with the # added optimization that we only need the last two rows # of the matrix. previous_row = np.arange(target.size + 1) for s in source: # Insertion (target grows longer than source): current_row = previous_row + 1 # Substitution or matching: # Target and source items are aligned, and either # are different (cost of 1), or are the same (cost of 0). current_row[1:] = np.minimum( current_row[1:], np.add(previous_row[:-1], target != s)) # Deletion (target grows shorter than source): current_row[1:] = np.minimum( current_row[1:], current_row[0:-1] + 1) previous_row = current_row return previous_row[-1]
[ "def", "levenshtein", "(", "source", ",", "target", ")", ":", "if", "len", "(", "source", ")", "<", "len", "(", "target", ")", ":", "return", "levenshtein", "(", "target", ",", "source", ")", "# So now we have len(source) >= len(target).", "if", "len", "(", ...
Cribbed from <https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python>
[ "Cribbed", "from", "<https", ":", "//", "en", ".", "wikibooks", ".", "org", "/", "wiki", "/", "Algorithm_Implementation", "/", "Strings", "/", "Levenshtein_distance#Python", ">" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/distance/levenshtein.py#L4-L42
coordt/django-alphabetfilter
alphafilter/views.py
alphafilter
def alphafilter(request, queryset, template): """ Render the template with the filtered queryset """ qs_filter = {} for key in list(request.GET.keys()): if '__istartswith' in key: qs_filter[str(key)] = request.GET[key] break return render_to_response( template, {'objects': queryset.filter(**qs_filter), 'unfiltered_objects': queryset}, context_instance=RequestContext(request) )
python
def alphafilter(request, queryset, template): """ Render the template with the filtered queryset """ qs_filter = {} for key in list(request.GET.keys()): if '__istartswith' in key: qs_filter[str(key)] = request.GET[key] break return render_to_response( template, {'objects': queryset.filter(**qs_filter), 'unfiltered_objects': queryset}, context_instance=RequestContext(request) )
[ "def", "alphafilter", "(", "request", ",", "queryset", ",", "template", ")", ":", "qs_filter", "=", "{", "}", "for", "key", "in", "list", "(", "request", ".", "GET", ".", "keys", "(", ")", ")", ":", "if", "'__istartswith'", "in", "key", ":", "qs_filt...
Render the template with the filtered queryset
[ "Render", "the", "template", "with", "the", "filtered", "queryset" ]
train
https://github.com/coordt/django-alphabetfilter/blob/a7bc21c0ea985c2021a4668241bf643c615c6c1f/alphafilter/views.py#L13-L29
ucbvislab/radiotool
radiotool/composer/rawvolume.py
RawVolume.to_array
def to_array(self, channels=2): """Return the array of multipliers for the dynamic""" if channels == 1: return self.volume_frames.reshape(-1, 1) if channels == 2: return np.tile(self.volume_frames, (2, 1)).T raise Exception( "RawVolume doesn't know what to do with %s channels" % channels)
python
def to_array(self, channels=2): """Return the array of multipliers for the dynamic""" if channels == 1: return self.volume_frames.reshape(-1, 1) if channels == 2: return np.tile(self.volume_frames, (2, 1)).T raise Exception( "RawVolume doesn't know what to do with %s channels" % channels)
[ "def", "to_array", "(", "self", ",", "channels", "=", "2", ")", ":", "if", "channels", "==", "1", ":", "return", "self", ".", "volume_frames", ".", "reshape", "(", "-", "1", ",", "1", ")", "if", "channels", "==", "2", ":", "return", "np", ".", "t...
Return the array of multipliers for the dynamic
[ "Return", "the", "array", "of", "multipliers", "for", "the", "dynamic" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/rawvolume.py#L25-L32
SylvanasSun/FishFishJump
fish_crawlers/slave_crawler/slave_crawler/spiders/crawler.py
SimpleCrawler.generate_simhash
def generate_simhash(self, item): """ Generate simhash based on title, description, keywords, p_texts and links_text. """ list = item['p_texts'] + item['links_text'] list.append(item['title']) list.append(item['description']) list.append(item['keywords']) return Simhash(','.join(list).strip()).hash
python
def generate_simhash(self, item): """ Generate simhash based on title, description, keywords, p_texts and links_text. """ list = item['p_texts'] + item['links_text'] list.append(item['title']) list.append(item['description']) list.append(item['keywords']) return Simhash(','.join(list).strip()).hash
[ "def", "generate_simhash", "(", "self", ",", "item", ")", ":", "list", "=", "item", "[", "'p_texts'", "]", "+", "item", "[", "'links_text'", "]", "list", ".", "append", "(", "item", "[", "'title'", "]", ")", "list", ".", "append", "(", "item", "[", ...
Generate simhash based on title, description, keywords, p_texts and links_text.
[ "Generate", "simhash", "based", "on", "title", "description", "keywords", "p_texts", "and", "links_text", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_crawlers/slave_crawler/slave_crawler/spiders/crawler.py#L46-L54
frnsys/broca
broca/knowledge/phrases.py
train_phrases
def train_phrases(paths, out='data/bigram_model.phrases', tokenizer=word_tokenize, **kwargs): """ Train a bigram phrase model on a list of files. """ n = 0 for path in paths: print('Counting lines for {0}...'.format(path)) n += sum(1 for line in open(path, 'r')) print('Processing {0} lines...'.format(n)) # Change to use less memory. Default is 40m. kwargs = { 'max_vocab_size': 40000000, 'threshold': 8. }.update(kwargs) print('Training bigrams...') bigram = Phrases(_phrase_doc_stream(paths, n, tokenizer=word_tokenize), **kwargs) print('Saving...') bigram.save(out)
python
def train_phrases(paths, out='data/bigram_model.phrases', tokenizer=word_tokenize, **kwargs): """ Train a bigram phrase model on a list of files. """ n = 0 for path in paths: print('Counting lines for {0}...'.format(path)) n += sum(1 for line in open(path, 'r')) print('Processing {0} lines...'.format(n)) # Change to use less memory. Default is 40m. kwargs = { 'max_vocab_size': 40000000, 'threshold': 8. }.update(kwargs) print('Training bigrams...') bigram = Phrases(_phrase_doc_stream(paths, n, tokenizer=word_tokenize), **kwargs) print('Saving...') bigram.save(out)
[ "def", "train_phrases", "(", "paths", ",", "out", "=", "'data/bigram_model.phrases'", ",", "tokenizer", "=", "word_tokenize", ",", "*", "*", "kwargs", ")", ":", "n", "=", "0", "for", "path", "in", "paths", ":", "print", "(", "'Counting lines for {0}...'", "....
Train a bigram phrase model on a list of files.
[ "Train", "a", "bigram", "phrase", "model", "on", "a", "list", "of", "files", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/phrases.py#L6-L26
frnsys/broca
broca/knowledge/phrases.py
_phrase_doc_stream
def _phrase_doc_stream(paths, n, tokenizer=word_tokenize): """ Generator to feed sentences to the phrase model. """ i = 0 p = Progress() for path in paths: with open(path, 'r') as f: for line in f: i += 1 p.print_progress(i/n) for sent in sent_tokenize(line.lower()): tokens = tokenizer(sent) yield tokens
python
def _phrase_doc_stream(paths, n, tokenizer=word_tokenize): """ Generator to feed sentences to the phrase model. """ i = 0 p = Progress() for path in paths: with open(path, 'r') as f: for line in f: i += 1 p.print_progress(i/n) for sent in sent_tokenize(line.lower()): tokens = tokenizer(sent) yield tokens
[ "def", "_phrase_doc_stream", "(", "paths", ",", "n", ",", "tokenizer", "=", "word_tokenize", ")", ":", "i", "=", "0", "p", "=", "Progress", "(", ")", "for", "path", "in", "paths", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", ...
Generator to feed sentences to the phrase model.
[ "Generator", "to", "feed", "sentences", "to", "the", "phrase", "model", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/phrases.py#L29-L42
SylvanasSun/FishFishJump
fish_core/simhash.py
_default_hashfunc
def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x
python
def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x
[ "def", "_default_hashfunc", "(", "content", ",", "hashbits", ")", ":", "if", "content", "==", "\"\"", ":", "return", "0", "x", "=", "ord", "(", "content", "[", "0", "]", ")", "<<", "7", "m", "=", "1000003", "mask", "=", "2", "**", "hashbits", "-", ...
Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number.
[ "Default", "hash", "function", "is", "variable", "-", "length", "version", "of", "Python", "s", "builtin", "hash", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L11-L29
SylvanasSun/FishFishJump
fish_core/simhash.py
_default_tokenizer_func
def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
python
def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
[ "def", "_default_tokenizer_func", "(", "content", ",", "keyword_weight_pair", ")", ":", "seg_list", "=", "jieba", ".", "lcut_for_search", "(", "content", ")", "# Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight", "return", "jieba", ".", "analyse",...
Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].
[ "Default", "tokenizer", "function", "that", "uses", "jieba", "tokenizer", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L33-L42
SylvanasSun/FishFishJump
fish_core/simhash.py
Simhash.simhash
def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content))
python
def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content))
[ "def", "simhash", "(", "self", ",", "content", ")", ":", "if", "content", "is", "None", ":", "self", ".", "hash", "=", "-", "1", "return", "if", "isinstance", "(", "content", ",", "str", ")", ":", "features", "=", "self", ".", "tokenizer_func", "(", ...
Select policies for simhash on the different types of content.
[ "Select", "policies", "for", "simhash", "on", "the", "different", "types", "of", "content", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L103-L119
SylvanasSun/FishFishJump
fish_core/simhash.py
Simhash.build_from_features
def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint
python
def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint
[ "def", "build_from_features", "(", "self", ",", "features", ")", ":", "v", "=", "[", "0", "]", "*", "self", ".", "hash_bit_number", "if", "isinstance", "(", "features", ",", "dict", ")", ":", "features", "=", "features", ".", "items", "(", ")", "# Star...
:param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair.
[ ":", "param", "features", ":", "a", "list", "of", "(", "token", "weight", ")", "tuples", "or", "a", "token", "-", ">", "weight", "dict", "if", "is", "a", "string", "so", "it", "need", "compute", "weight", "(", "a", "weight", "of", "1", "will", "be"...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L121-L152
SylvanasSun/FishFishJump
fish_core/simhash.py
Simhash.is_equal
def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
python
def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
[ "def", "is_equal", "(", "self", ",", "another", ",", "limit", "=", "0.8", ")", ":", "if", "another", "is", "None", ":", "raise", "Exception", "(", "\"Parameter another is null\"", ")", "if", "isinstance", "(", "another", ",", "int", ")", ":", "distance", ...
Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false.
[ "Determine", "two", "simhash", "are", "similar", "or", "not", "similar", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L154-L176
SylvanasSun/FishFishJump
fish_core/simhash.py
Simhash.hamming_distance
def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result
python
def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result
[ "def", "hamming_distance", "(", "self", ",", "another", ")", ":", "x", "=", "(", "self", ".", "hash", "^", "another", ")", "&", "(", "(", "1", "<<", "self", ".", "hash_bit_number", ")", "-", "1", ")", "result", "=", "0", "while", "x", ":", "resul...
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash.
[ "Compute", "hamming", "distance", "hamming", "distance", "is", "a", "total", "number", "of", "different", "bits", "of", "two", "binary", "numbers", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/simhash.py#L178-L190
durden/nikeplus
nikeplusapi/export.py
_validate_date_str
def _validate_date_str(str_): """Validate str as a date and return string version of date""" if not str_: return None # Convert to datetime so we can validate it's a real date that exists then # convert it back to the string. try: date = datetime.strptime(str_, DATE_FMT) except ValueError: msg = 'Invalid date format, should be YYYY-MM-DD' raise argparse.ArgumentTypeError(msg) return date.strftime(DATE_FMT)
python
def _validate_date_str(str_): """Validate str as a date and return string version of date""" if not str_: return None # Convert to datetime so we can validate it's a real date that exists then # convert it back to the string. try: date = datetime.strptime(str_, DATE_FMT) except ValueError: msg = 'Invalid date format, should be YYYY-MM-DD' raise argparse.ArgumentTypeError(msg) return date.strftime(DATE_FMT)
[ "def", "_validate_date_str", "(", "str_", ")", ":", "if", "not", "str_", ":", "return", "None", "# Convert to datetime so we can validate it's a real date that exists then", "# convert it back to the string.", "try", ":", "date", "=", "datetime", ".", "strptime", "(", "st...
Validate str as a date and return string version of date
[ "Validate", "str", "as", "a", "date", "and", "return", "string", "version", "of", "date" ]
train
https://github.com/durden/nikeplus/blob/7a8b65774d33538d8867c09667f0b70f9c3bf347/nikeplusapi/export.py#L45-L59
durden/nikeplus
nikeplusapi/export.py
_parse_args
def _parse_args(): """Parse sys.argv arguments""" token_file = os.path.expanduser('~/.nikeplus_access_token') parser = argparse.ArgumentParser(description='Export NikePlus data to CSV') parser.add_argument('-t', '--token', required=False, default=None, help=('Access token for API, can also store in file %s' ' to avoid passing via command line' % (token_file))) parser.add_argument('-s', '--since', type=_validate_date_str, help=('Only process entries starting with YYYY-MM-DD ' 'and newer')) args = vars(parser.parse_args()) if args['token'] is None: try: with open(token_file, 'r') as _file: access_token = _file.read().strip() except IOError: print 'Must pass access token via command line or store in file %s' % ( token_file) sys.exit(-1) args['token'] = access_token return args
python
def _parse_args(): """Parse sys.argv arguments""" token_file = os.path.expanduser('~/.nikeplus_access_token') parser = argparse.ArgumentParser(description='Export NikePlus data to CSV') parser.add_argument('-t', '--token', required=False, default=None, help=('Access token for API, can also store in file %s' ' to avoid passing via command line' % (token_file))) parser.add_argument('-s', '--since', type=_validate_date_str, help=('Only process entries starting with YYYY-MM-DD ' 'and newer')) args = vars(parser.parse_args()) if args['token'] is None: try: with open(token_file, 'r') as _file: access_token = _file.read().strip() except IOError: print 'Must pass access token via command line or store in file %s' % ( token_file) sys.exit(-1) args['token'] = access_token return args
[ "def", "_parse_args", "(", ")", ":", "token_file", "=", "os", ".", "path", ".", "expanduser", "(", "'~/.nikeplus_access_token'", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Export NikePlus data to CSV'", ")", "parser", ".", ...
Parse sys.argv arguments
[ "Parse", "sys", ".", "argv", "arguments" ]
train
https://github.com/durden/nikeplus/blob/7a8b65774d33538d8867c09667f0b70f9c3bf347/nikeplusapi/export.py#L62-L89
frnsys/broca
broca/similarity/doc/entkey.py
EntKeySimilarity.similarity
def similarity(self, d, d_): """ Compute a similarity score for two documents. Optionally pass in a `term_sim_ref` dict-like, which should be able to take `term1, term2` as args and return their similarity. """ es = set([e.name for e in d.entities]) es_ = set([e.name for e in d_.entities]) e_weight = (len(es) + len(es_) - abs(len(es) - len(es_)))/2 e_score = sum(self.idf_entity[t] for t in es & es_) toks = set(d.tokens) toks_ = set(d_.tokens) t_weight = (len(toks) + len(toks_) - abs(len(toks) - len(toks_)))/2 # If no term similarity reference is passed, # look only at surface form overlap (i.e. exact overlap) shared_toks = toks & toks_ overlap = [(t, t, self.idf[t]) for t in shared_toks] t_score = sum(self.idf[t] for t in shared_toks) if self.term_sim_ref is not None: # Double-count exact overlaps b/c we are # comparing bidirectional term pairs here t_score *= 2 for toks1, toks2 in [(toks, toks_), (toks_, toks)]: for t in toks1 - shared_toks: best_match = max(toks2, key=lambda t_: self.term_sim_ref[t, t_]) sim = self.term_sim_ref[t, best_match] t_score += sim * ((self.idf[t] + self.idf[best_match])/2) if sim > 0: overlap.append((t, best_match, sim * ((self.idf[t] + self.idf[best_match])/2))) # Adjust term weight #t_weight /= 2 t_weight = 1/t_weight if t_weight != 0 else 0 e_weight = 1/e_weight if e_weight != 0 else 0 t_score *= t_weight e_score *= e_weight if self.debug: print('\n-------------------------') print((d.id, d_.id)) print('DOC:', d.id) print('DOC:', d_.id) print('\tEntities:') print('\t', es) print('\t', es_) print('\t\tEntity overlap:', es & es_) print('\t\tEntity weight:', e_weight) print('\t\tEntity score:', e_score) print('\tTokens:') print('\t\t', toks) print('\t\t', toks_) print('\t\tToken overlap:', overlap) print('\t\tToken weight:', t_weight) print('\t\tToken score:', t_score) print('\tTotal score:', t_score + e_score) return t_score + e_score
python
def similarity(self, d, d_): """ Compute a similarity score for two documents. Optionally pass in a `term_sim_ref` dict-like, which should be able to take `term1, term2` as args and return their similarity. """ es = set([e.name for e in d.entities]) es_ = set([e.name for e in d_.entities]) e_weight = (len(es) + len(es_) - abs(len(es) - len(es_)))/2 e_score = sum(self.idf_entity[t] for t in es & es_) toks = set(d.tokens) toks_ = set(d_.tokens) t_weight = (len(toks) + len(toks_) - abs(len(toks) - len(toks_)))/2 # If no term similarity reference is passed, # look only at surface form overlap (i.e. exact overlap) shared_toks = toks & toks_ overlap = [(t, t, self.idf[t]) for t in shared_toks] t_score = sum(self.idf[t] for t in shared_toks) if self.term_sim_ref is not None: # Double-count exact overlaps b/c we are # comparing bidirectional term pairs here t_score *= 2 for toks1, toks2 in [(toks, toks_), (toks_, toks)]: for t in toks1 - shared_toks: best_match = max(toks2, key=lambda t_: self.term_sim_ref[t, t_]) sim = self.term_sim_ref[t, best_match] t_score += sim * ((self.idf[t] + self.idf[best_match])/2) if sim > 0: overlap.append((t, best_match, sim * ((self.idf[t] + self.idf[best_match])/2))) # Adjust term weight #t_weight /= 2 t_weight = 1/t_weight if t_weight != 0 else 0 e_weight = 1/e_weight if e_weight != 0 else 0 t_score *= t_weight e_score *= e_weight if self.debug: print('\n-------------------------') print((d.id, d_.id)) print('DOC:', d.id) print('DOC:', d_.id) print('\tEntities:') print('\t', es) print('\t', es_) print('\t\tEntity overlap:', es & es_) print('\t\tEntity weight:', e_weight) print('\t\tEntity score:', e_score) print('\tTokens:') print('\t\t', toks) print('\t\t', toks_) print('\t\tToken overlap:', overlap) print('\t\tToken weight:', t_weight) print('\t\tToken score:', t_score) print('\tTotal score:', t_score + e_score) return t_score + e_score
[ "def", "similarity", "(", "self", ",", "d", ",", "d_", ")", ":", "es", "=", "set", "(", "[", "e", ".", "name", "for", "e", "in", "d", ".", "entities", "]", ")", "es_", "=", "set", "(", "[", "e", ".", "name", "for", "e", "in", "d_", ".", "...
Compute a similarity score for two documents. Optionally pass in a `term_sim_ref` dict-like, which should be able to take `term1, term2` as args and return their similarity.
[ "Compute", "a", "similarity", "score", "for", "two", "documents", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/similarity/doc/entkey.py#L30-L92
peterldowns/python-mustache
mustache/rendering.py
get_match_info
def get_match_info(template, match, state): """ Given a template and a regex match within said template, return a dictionary of information about the match to be used to help parse the template. """ info = match.groupdict() # Put special delimiter cases in terms of normal ones if info['change']: info.update({ 'tag_type' : '=', 'tag_key' : info['delims'], }) elif info['raw']: info.update({ 'tag_type' : '&', 'tag_key' : info['raw_key'], }) # Rename the important match variables for convenience tag_start = match.start() tag_end = match.end() tag_type = info['tag_type'] tag_key = info['tag_key'] lead_wsp = info['lead_wsp'] end_wsp = info['end_wsp'] begins_line = (tag_start == 0) or (template[tag_start-1] in state.eol_chars) ends_line = (tag_end == len(template) or template[tag_end] in state.eol_chars) interpolating = (tag_type in ('', '&')) standalone = (not interpolating) and begins_line and ends_line if end_wsp: tag_end -= len(end_wsp) if standalone: template_length = len(template) # Standalone tags strip exactly one occurence of '\r', '\n', or '\r\n' # from the end of the line. if tag_end < len(template) and template[tag_end] == '\r': tag_end += 1 if tag_end < len(template) and template[tag_end] == '\n': tag_end += 1 elif lead_wsp: tag_start += len(lead_wsp) lead_wsp = '' info.update({ 'tag_start' : tag_start, 'tag_end' : tag_end, 'tag_type' : tag_type, 'tag_key' : tag_key, 'lead_wsp' : lead_wsp, 'end_wsp' : end_wsp, 'begins_line' : begins_line, 'ends_line' : ends_line, 'interpolating' : interpolating, 'standalone' : standalone, }) return info
python
def get_match_info(template, match, state): """ Given a template and a regex match within said template, return a dictionary of information about the match to be used to help parse the template. """ info = match.groupdict() # Put special delimiter cases in terms of normal ones if info['change']: info.update({ 'tag_type' : '=', 'tag_key' : info['delims'], }) elif info['raw']: info.update({ 'tag_type' : '&', 'tag_key' : info['raw_key'], }) # Rename the important match variables for convenience tag_start = match.start() tag_end = match.end() tag_type = info['tag_type'] tag_key = info['tag_key'] lead_wsp = info['lead_wsp'] end_wsp = info['end_wsp'] begins_line = (tag_start == 0) or (template[tag_start-1] in state.eol_chars) ends_line = (tag_end == len(template) or template[tag_end] in state.eol_chars) interpolating = (tag_type in ('', '&')) standalone = (not interpolating) and begins_line and ends_line if end_wsp: tag_end -= len(end_wsp) if standalone: template_length = len(template) # Standalone tags strip exactly one occurence of '\r', '\n', or '\r\n' # from the end of the line. if tag_end < len(template) and template[tag_end] == '\r': tag_end += 1 if tag_end < len(template) and template[tag_end] == '\n': tag_end += 1 elif lead_wsp: tag_start += len(lead_wsp) lead_wsp = '' info.update({ 'tag_start' : tag_start, 'tag_end' : tag_end, 'tag_type' : tag_type, 'tag_key' : tag_key, 'lead_wsp' : lead_wsp, 'end_wsp' : end_wsp, 'begins_line' : begins_line, 'ends_line' : ends_line, 'interpolating' : interpolating, 'standalone' : standalone, }) return info
[ "def", "get_match_info", "(", "template", ",", "match", ",", "state", ")", ":", "info", "=", "match", ".", "groupdict", "(", ")", "# Put special delimiter cases in terms of normal ones", "if", "info", "[", "'change'", "]", ":", "info", ".", "update", "(", "{",...
Given a template and a regex match within said template, return a dictionary of information about the match to be used to help parse the template.
[ "Given", "a", "template", "and", "a", "regex", "match", "within", "said", "template", "return", "a", "dictionary", "of", "information", "about", "the", "match", "to", "be", "used", "to", "help", "parse", "the", "template", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L10-L70
peterldowns/python-mustache
mustache/rendering.py
get_tag_context
def get_tag_context(name, state): """ Given a tag name, return its associated value as defined in the current context stack. """ new_contexts = 0 ctm = None while True: try: ctx_key, name = name.split('.', 1) ctm = state.context.get(ctx_key) except ValueError: break if not ctm: break else: state.context.push(ctm) new_contexts += 1 ctm = state.context.get(name) return new_contexts, ctm
python
def get_tag_context(name, state): """ Given a tag name, return its associated value as defined in the current context stack. """ new_contexts = 0 ctm = None while True: try: ctx_key, name = name.split('.', 1) ctm = state.context.get(ctx_key) except ValueError: break if not ctm: break else: state.context.push(ctm) new_contexts += 1 ctm = state.context.get(name) return new_contexts, ctm
[ "def", "get_tag_context", "(", "name", ",", "state", ")", ":", "new_contexts", "=", "0", "ctm", "=", "None", "while", "True", ":", "try", ":", "ctx_key", ",", "name", "=", "name", ".", "split", "(", "'.'", ",", "1", ")", "ctm", "=", "state", ".", ...
Given a tag name, return its associated value as defined in the current context stack.
[ "Given", "a", "tag", "name", "return", "its", "associated", "value", "as", "defined", "in", "the", "current", "context", "stack", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L72-L93
peterldowns/python-mustache
mustache/rendering.py
section_end_info
def section_end_info(template, tag_key, state, index): """ Given the tag key of an opening section tag, find the corresponding closing tag (if it exists) and return information about that match. """ state.section.push(tag_key) match = None matchinfo = None search_index = index while state.section: match = state.tag_re.search(template, search_index) if not match: raise Exception("Open section %s never closed" % tag_key) matchinfo = get_match_info(template, match, state) # If we find a new section tag, add it to the stack and keep going if matchinfo['tag_type'] in ('#', '^'): state.section.push(matchinfo['tag_key']) # If we find a closing tag for the current section, 'close' it by # popping the stack elif matchinfo['tag_type'] == '/': if matchinfo['tag_key'] == state.section(): state.section.pop() else: raise Exception( 'Unexpected section end: received %s, expected {{/%s}}' % ( repr(match.group(0)), tag_key)) search_index = matchinfo['tag_end'] return matchinfo
python
def section_end_info(template, tag_key, state, index): """ Given the tag key of an opening section tag, find the corresponding closing tag (if it exists) and return information about that match. """ state.section.push(tag_key) match = None matchinfo = None search_index = index while state.section: match = state.tag_re.search(template, search_index) if not match: raise Exception("Open section %s never closed" % tag_key) matchinfo = get_match_info(template, match, state) # If we find a new section tag, add it to the stack and keep going if matchinfo['tag_type'] in ('#', '^'): state.section.push(matchinfo['tag_key']) # If we find a closing tag for the current section, 'close' it by # popping the stack elif matchinfo['tag_type'] == '/': if matchinfo['tag_key'] == state.section(): state.section.pop() else: raise Exception( 'Unexpected section end: received %s, expected {{/%s}}' % ( repr(match.group(0)), tag_key)) search_index = matchinfo['tag_end'] return matchinfo
[ "def", "section_end_info", "(", "template", ",", "tag_key", ",", "state", ",", "index", ")", ":", "state", ".", "section", ".", "push", "(", "tag_key", ")", "match", "=", "None", "matchinfo", "=", "None", "search_index", "=", "index", "while", "state", "...
Given the tag key of an opening section tag, find the corresponding closing tag (if it exists) and return information about that match.
[ "Given", "the", "tag", "key", "of", "an", "opening", "section", "tag", "find", "the", "corresponding", "closing", "tag", "(", "if", "it", "exists", ")", "and", "return", "information", "about", "that", "match", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L95-L127
peterldowns/python-mustache
mustache/rendering.py
render
def render(template, context, partials={}, state=None): """ Renders a given mustache template, with sane defaults. """ # Create a new state by default state = state or State() # Add context to the state dict if isinstance(context, Context): state.context = context else: state.context = Context(context) # Add any partials to the state dict if partials: state.partials.push(partials) # Render the rendered template return __render(make_unicode(template), state)
python
def render(template, context, partials={}, state=None): """ Renders a given mustache template, with sane defaults. """ # Create a new state by default state = state or State() # Add context to the state dict if isinstance(context, Context): state.context = context else: state.context = Context(context) # Add any partials to the state dict if partials: state.partials.push(partials) # Render the rendered template return __render(make_unicode(template), state)
[ "def", "render", "(", "template", ",", "context", ",", "partials", "=", "{", "}", ",", "state", "=", "None", ")", ":", "# Create a new state by default", "state", "=", "state", "or", "State", "(", ")", "# Add context to the state dict", "if", "isinstance", "("...
Renders a given mustache template, with sane defaults.
[ "Renders", "a", "given", "mustache", "template", "with", "sane", "defaults", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L130-L146
peterldowns/python-mustache
mustache/rendering.py
__render
def __render(template, state, index=0): """ Given a /template/ string, a parser /state/, and a starting offset (/index/), return the rendered version of the template. """ # Find a Match match = state.tag_re.search(template, index) if not match: return template[index:] info = get_match_info(template, match, state) _pre = template[index : info['tag_start']] # template before the tag _tag = template[info['tag_start'] : info['tag_end']] # tag _continue = info['tag_end'] # the index at which to continue # Comment if info['tag_type'] == '!': # Comments are removed from output repl = "" # Delimiter change elif info['tag_type'] == '=': # Delimiters are changed; the tag is rendered as "" delimiters = re.split(r'\s*', info['tag_key']) new_tags = state.tags(_copy=True) new_tags['otag'], new_tags['ctag'] = map(re.escape, delimiters) state.push_tags(new_tags) repl = "" # Plain tag elif info['tag_type'] == '': repl = __render_tag(info, state) # Raw tag (should not be escaped) elif info['tag_type'] == '&': state.escape.push(False) repl = __render_tag(info, state) state.escape.pop() # Partial elif info['tag_type'] == '>': partial_name = info['tag_key'] partial_template = None new_dir = None lead_wsp = re.compile(r'^(.)', re.M) repl = '' try: # Cached partial_template = state.partials()[partial_name] except (KeyError, IndexError): try: # Load the partial template from a file (if it exists) new_dir, filename = split(partial_name) if new_dir: state.partials_dir.push(new_dir) partial_template = load_template(filename, state.abs_partials_dir, state.extension, state.encoding, state.encoding_error) except (IOError): pass if partial_template: # Preserve indentation if info['standalone']: partial_template = lead_wsp.sub(info['lead_wsp']+r'\1', partial_template) # Update state state.partials.push(state.partials()) # XXX wtf is this shit? state.push_tags(state.default_tags) # Render the partial repl = __render(partial_template, state) # Restore state state.partials.pop() state.pop_tags() if new_dir: state.partials_dir.pop() # Section # TODO(peter): add a stop= index to __render so that template_to_inner does # not need to be constructed with [:] indexing, which is extremely # expensive. elif info['tag_type'] in ('#', '^'): otag_info = info ctag_info = section_end_info(template, info['tag_key'], state, _continue) # Don't want to parse beyond the end of the inner section, but # must include information on prior contents so that whitespace # is preserved correctly and inner tags are not marked as standalone. inner_start = otag_info['tag_end'] inner_end = ctag_info['tag_start'] _continue = ctag_info['tag_end'] template_with_inner = template[:inner_end] new_contexts, ctm = get_tag_context(otag_info['tag_key'], state) truthy = otag_info['tag_type'] == '#' #if ctm is not None: if ctm: # If there's a match and it's callable, feed it the inner template if callable(ctm): template_to_inner = template[:inner_start] inner = template[inner_start:inner_end] template_with_inner = template_to_inner + make_unicode(ctm(inner)) # Make the context list an iterable from the ctm if not hasattr(ctm, '__iter__') or isinstance(ctm, dict): ctx_list = [ctm] else: ctx_list = ctm # If there's no match, there are no new contexts else: ctx_list = [False] # If there are new contexts and the section is truthy, or if # there are no new contexts and the section is falsy, render # the contents repl_stack = [] for ctx in ctx_list: if (truthy and ctx) or (not truthy and not ctx): state.context.push(ctx) repl_stack.append( __render(template_with_inner, state, inner_start)) else: break repl = ''.join(repl_stack) for i in xrange(new_contexts): state.context.pop() else: raise Exception("found unpaired end of section tag!") return u''.join(( _pre, make_unicode(repl), __render(template, state, _continue)))
python
def __render(template, state, index=0): """ Given a /template/ string, a parser /state/, and a starting offset (/index/), return the rendered version of the template. """ # Find a Match match = state.tag_re.search(template, index) if not match: return template[index:] info = get_match_info(template, match, state) _pre = template[index : info['tag_start']] # template before the tag _tag = template[info['tag_start'] : info['tag_end']] # tag _continue = info['tag_end'] # the index at which to continue # Comment if info['tag_type'] == '!': # Comments are removed from output repl = "" # Delimiter change elif info['tag_type'] == '=': # Delimiters are changed; the tag is rendered as "" delimiters = re.split(r'\s*', info['tag_key']) new_tags = state.tags(_copy=True) new_tags['otag'], new_tags['ctag'] = map(re.escape, delimiters) state.push_tags(new_tags) repl = "" # Plain tag elif info['tag_type'] == '': repl = __render_tag(info, state) # Raw tag (should not be escaped) elif info['tag_type'] == '&': state.escape.push(False) repl = __render_tag(info, state) state.escape.pop() # Partial elif info['tag_type'] == '>': partial_name = info['tag_key'] partial_template = None new_dir = None lead_wsp = re.compile(r'^(.)', re.M) repl = '' try: # Cached partial_template = state.partials()[partial_name] except (KeyError, IndexError): try: # Load the partial template from a file (if it exists) new_dir, filename = split(partial_name) if new_dir: state.partials_dir.push(new_dir) partial_template = load_template(filename, state.abs_partials_dir, state.extension, state.encoding, state.encoding_error) except (IOError): pass if partial_template: # Preserve indentation if info['standalone']: partial_template = lead_wsp.sub(info['lead_wsp']+r'\1', partial_template) # Update state state.partials.push(state.partials()) # XXX wtf is this shit? state.push_tags(state.default_tags) # Render the partial repl = __render(partial_template, state) # Restore state state.partials.pop() state.pop_tags() if new_dir: state.partials_dir.pop() # Section # TODO(peter): add a stop= index to __render so that template_to_inner does # not need to be constructed with [:] indexing, which is extremely # expensive. elif info['tag_type'] in ('#', '^'): otag_info = info ctag_info = section_end_info(template, info['tag_key'], state, _continue) # Don't want to parse beyond the end of the inner section, but # must include information on prior contents so that whitespace # is preserved correctly and inner tags are not marked as standalone. inner_start = otag_info['tag_end'] inner_end = ctag_info['tag_start'] _continue = ctag_info['tag_end'] template_with_inner = template[:inner_end] new_contexts, ctm = get_tag_context(otag_info['tag_key'], state) truthy = otag_info['tag_type'] == '#' #if ctm is not None: if ctm: # If there's a match and it's callable, feed it the inner template if callable(ctm): template_to_inner = template[:inner_start] inner = template[inner_start:inner_end] template_with_inner = template_to_inner + make_unicode(ctm(inner)) # Make the context list an iterable from the ctm if not hasattr(ctm, '__iter__') or isinstance(ctm, dict): ctx_list = [ctm] else: ctx_list = ctm # If there's no match, there are no new contexts else: ctx_list = [False] # If there are new contexts and the section is truthy, or if # there are no new contexts and the section is falsy, render # the contents repl_stack = [] for ctx in ctx_list: if (truthy and ctx) or (not truthy and not ctx): state.context.push(ctx) repl_stack.append( __render(template_with_inner, state, inner_start)) else: break repl = ''.join(repl_stack) for i in xrange(new_contexts): state.context.pop() else: raise Exception("found unpaired end of section tag!") return u''.join(( _pre, make_unicode(repl), __render(template, state, _continue)))
[ "def", "__render", "(", "template", ",", "state", ",", "index", "=", "0", ")", ":", "# Find a Match", "match", "=", "state", ".", "tag_re", ".", "search", "(", "template", ",", "index", ")", "if", "not", "match", ":", "return", "template", "[", "index"...
Given a /template/ string, a parser /state/, and a starting offset (/index/), return the rendered version of the template.
[ "Given", "a", "/", "template", "/", "string", "a", "parser", "/", "state", "/", "and", "a", "starting", "offset", "(", "/", "index", "/", ")", "return", "the", "rendered", "version", "of", "the", "template", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L148-L284
peterldowns/python-mustache
mustache/rendering.py
__render_tag
def __render_tag(info, state): """ Render an individual tag by making the appropriate replacement within the current context (if any). """ new_contexts, context_match = get_tag_context(info['tag_key'], state) replacement = '' if context_match or context_match == 0: replacement = context_match elif info['tag_key'] == '.': replacement = state.context() else: replacement = '' # Call all callables / methods / lambdas / functions if replacement and callable(replacement): replacement = make_unicode(replacement()) state.push_tags(state.default_tags) replacement = __render(template=replacement, state=state) state.pop_tags() for i in xrange(new_contexts): state.context.pop() if state.escape(): return html_escape(replacement) return replacement
python
def __render_tag(info, state): """ Render an individual tag by making the appropriate replacement within the current context (if any). """ new_contexts, context_match = get_tag_context(info['tag_key'], state) replacement = '' if context_match or context_match == 0: replacement = context_match elif info['tag_key'] == '.': replacement = state.context() else: replacement = '' # Call all callables / methods / lambdas / functions if replacement and callable(replacement): replacement = make_unicode(replacement()) state.push_tags(state.default_tags) replacement = __render(template=replacement, state=state) state.pop_tags() for i in xrange(new_contexts): state.context.pop() if state.escape(): return html_escape(replacement) return replacement
[ "def", "__render_tag", "(", "info", ",", "state", ")", ":", "new_contexts", ",", "context_match", "=", "get_tag_context", "(", "info", "[", "'tag_key'", "]", ",", "state", ")", "replacement", "=", "''", "if", "context_match", "or", "context_match", "==", "0"...
Render an individual tag by making the appropriate replacement within the current context (if any).
[ "Render", "an", "individual", "tag", "by", "making", "the", "appropriate", "replacement", "within", "the", "current", "context", "(", "if", "any", ")", "." ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/rendering.py#L286-L311
SylvanasSun/FishFishJump
fish_core/utils/common_utils.py
format_dict_to_str
def format_dict_to_str(dict, format): """ Format a dictionary to the string, param format is a specified format rule such as dict = '{'name':'Sylvanas', 'gender':'Boy'}' format = '-' so result is 'name-Sylvanas, gender-Boy'. >>> dict = {'name': 'Sylvanas', 'gender': 'Boy'} >>> format_dict_to_str(dict, format='-') 'name-Sylvanas, gender-Boy' """ result = '' for k, v in dict.items(): result = result + str(k) + format + str(v) + ', ' return result[:-2]
python
def format_dict_to_str(dict, format): """ Format a dictionary to the string, param format is a specified format rule such as dict = '{'name':'Sylvanas', 'gender':'Boy'}' format = '-' so result is 'name-Sylvanas, gender-Boy'. >>> dict = {'name': 'Sylvanas', 'gender': 'Boy'} >>> format_dict_to_str(dict, format='-') 'name-Sylvanas, gender-Boy' """ result = '' for k, v in dict.items(): result = result + str(k) + format + str(v) + ', ' return result[:-2]
[ "def", "format_dict_to_str", "(", "dict", ",", "format", ")", ":", "result", "=", "''", "for", "k", ",", "v", "in", "dict", ".", "items", "(", ")", ":", "result", "=", "result", "+", "str", "(", "k", ")", "+", "format", "+", "str", "(", "v", ")...
Format a dictionary to the string, param format is a specified format rule such as dict = '{'name':'Sylvanas', 'gender':'Boy'}' format = '-' so result is 'name-Sylvanas, gender-Boy'. >>> dict = {'name': 'Sylvanas', 'gender': 'Boy'} >>> format_dict_to_str(dict, format='-') 'name-Sylvanas, gender-Boy'
[ "Format", "a", "dictionary", "to", "the", "string", "param", "format", "is", "a", "specified", "format", "rule", "such", "as", "dict", "=", "{", "name", ":", "Sylvanas", "gender", ":", "Boy", "}", "format", "=", "-", "so", "result", "is", "name", "-", ...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/utils/common_utils.py#L6-L19
SylvanasSun/FishFishJump
fish_core/utils/common_utils.py
list_to_str
def list_to_str(list, separator=','): """ >>> list = [0, 0, 7] >>> list_to_str(list) '0,0,7' """ list = [str(x) for x in list] return separator.join(list)
python
def list_to_str(list, separator=','): """ >>> list = [0, 0, 7] >>> list_to_str(list) '0,0,7' """ list = [str(x) for x in list] return separator.join(list)
[ "def", "list_to_str", "(", "list", ",", "separator", "=", "','", ")", ":", "list", "=", "[", "str", "(", "x", ")", "for", "x", "in", "list", "]", "return", "separator", ".", "join", "(", "list", ")" ]
>>> list = [0, 0, 7] >>> list_to_str(list) '0,0,7'
[ ">>>", "list", "=", "[", "0", "0", "7", "]", ">>>", "list_to_str", "(", "list", ")", "0", "0", "7" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/utils/common_utils.py#L26-L33
SylvanasSun/FishFishJump
fish_core/utils/common_utils.py
unite_dict
def unite_dict(a, b): """ >>> a = {'name': 'Sylvanas'} >>> b = {'gender': 'Man'} >>> unite_dict(a, b) {'name': 'Sylvanas', 'gender': 'Man'} """ c = {} c.update(a) c.update(b) return c
python
def unite_dict(a, b): """ >>> a = {'name': 'Sylvanas'} >>> b = {'gender': 'Man'} >>> unite_dict(a, b) {'name': 'Sylvanas', 'gender': 'Man'} """ c = {} c.update(a) c.update(b) return c
[ "def", "unite_dict", "(", "a", ",", "b", ")", ":", "c", "=", "{", "}", "c", ".", "update", "(", "a", ")", "c", ".", "update", "(", "b", ")", "return", "c" ]
>>> a = {'name': 'Sylvanas'} >>> b = {'gender': 'Man'} >>> unite_dict(a, b) {'name': 'Sylvanas', 'gender': 'Man'}
[ ">>>", "a", "=", "{", "name", ":", "Sylvanas", "}", ">>>", "b", "=", "{", "gender", ":", "Man", "}", ">>>", "unite_dict", "(", "a", "b", ")", "{", "name", ":", "Sylvanas", "gender", ":", "Man", "}" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/utils/common_utils.py#L45-L55
SylvanasSun/FishFishJump
fish_core/utils/common_utils.py
check_validity_for_dict
def check_validity_for_dict(keys, dict): """ >>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False """ for key in keys: if key not in dict or dict[key] is '' or dict[key] is None: return False return True
python
def check_validity_for_dict(keys, dict): """ >>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False """ for key in keys: if key not in dict or dict[key] is '' or dict[key] is None: return False return True
[ "def", "check_validity_for_dict", "(", "keys", ",", "dict", ")", ":", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "dict", "or", "dict", "[", "key", "]", "is", "''", "or", "dict", "[", "key", "]", "is", "None", ":", "return", "False",...
>>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False
[ ">>>", "dict", "=", "{", "a", ":", "0", "b", ":", "1", "c", ":", "2", "}", ">>>", "keys", "=", "[", "a", "d", "e", "]", ">>>", "check_validity_for_dict", "(", "keys", "dict", ")", "==", "False", "True", ">>>", "keys", "=", "[", "a", "b", "c",...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/utils/common_utils.py#L58-L71
frnsys/broca
broca/common/util.py
parallel
def parallel(func, inputs, n_jobs, expand_args=False): """ Convenience wrapper around joblib's parallelization. """ if expand_args: return Parallel(n_jobs=n_jobs)(delayed(func)(*args) for args in inputs) else: return Parallel(n_jobs=n_jobs)(delayed(func)(arg) for arg in inputs)
python
def parallel(func, inputs, n_jobs, expand_args=False): """ Convenience wrapper around joblib's parallelization. """ if expand_args: return Parallel(n_jobs=n_jobs)(delayed(func)(*args) for args in inputs) else: return Parallel(n_jobs=n_jobs)(delayed(func)(arg) for arg in inputs)
[ "def", "parallel", "(", "func", ",", "inputs", ",", "n_jobs", ",", "expand_args", "=", "False", ")", ":", "if", "expand_args", ":", "return", "Parallel", "(", "n_jobs", "=", "n_jobs", ")", "(", "delayed", "(", "func", ")", "(", "*", "args", ")", "for...
Convenience wrapper around joblib's parallelization.
[ "Convenience", "wrapper", "around", "joblib", "s", "parallelization", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/common/util.py#L59-L66
davebridges/mousedb
mousedb/timed_mating/views.py
breeding_plugevent
def breeding_plugevent(request, breeding_id): """This view defines a form for adding new plug events from a breeding cage. This form requires a breeding_id from a breeding set and restricts the PlugFemale and PlugMale to animals that are defined in that breeding cage.""" breeding = get_object_or_404(Breeding, pk=breeding_id) if request.method == "POST": form = BreedingPlugForm(request.POST, request.FILES) if form.is_valid(): plug = form.save(commit=False) plug.Breeding_id = breeding.id plug.save() form.save() return HttpResponseRedirect(reverse("plugevents-list")) else: form = BreedingPlugForm() form.fields["PlugFemale"].queryset = breeding.Females.all() form.fields["PlugMale"].queryset = breeding.Male.all() return render(request, 'breeding_plugevent_form.html', {'form':form, 'breeding':breeding})
python
def breeding_plugevent(request, breeding_id): """This view defines a form for adding new plug events from a breeding cage. This form requires a breeding_id from a breeding set and restricts the PlugFemale and PlugMale to animals that are defined in that breeding cage.""" breeding = get_object_or_404(Breeding, pk=breeding_id) if request.method == "POST": form = BreedingPlugForm(request.POST, request.FILES) if form.is_valid(): plug = form.save(commit=False) plug.Breeding_id = breeding.id plug.save() form.save() return HttpResponseRedirect(reverse("plugevents-list")) else: form = BreedingPlugForm() form.fields["PlugFemale"].queryset = breeding.Females.all() form.fields["PlugMale"].queryset = breeding.Male.all() return render(request, 'breeding_plugevent_form.html', {'form':form, 'breeding':breeding})
[ "def", "breeding_plugevent", "(", "request", ",", "breeding_id", ")", ":", "breeding", "=", "get_object_or_404", "(", "Breeding", ",", "pk", "=", "breeding_id", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "form", "=", "BreedingPlugForm", "(", ...
This view defines a form for adding new plug events from a breeding cage. This form requires a breeding_id from a breeding set and restricts the PlugFemale and PlugMale to animals that are defined in that breeding cage.
[ "This", "view", "defines", "a", "form", "for", "adding", "new", "plug", "events", "from", "a", "breeding", "cage", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/timed_mating/views.py#L83-L100
davebridges/mousedb
mousedb/timed_mating/views.py
PlugEventsListStrain.get_queryset
def get_queryset(self): """The queryset is over-ridden to show only plug events in which the strain matches the breeding strain.""" self.strain = get_object_or_404(Strain, Strain_slug__iexact=self.kwargs['slug']) return PlugEvents.objects.filter(Breeding__Strain=self.strain)
python
def get_queryset(self): """The queryset is over-ridden to show only plug events in which the strain matches the breeding strain.""" self.strain = get_object_or_404(Strain, Strain_slug__iexact=self.kwargs['slug']) return PlugEvents.objects.filter(Breeding__Strain=self.strain)
[ "def", "get_queryset", "(", "self", ")", ":", "self", ".", "strain", "=", "get_object_or_404", "(", "Strain", ",", "Strain_slug__iexact", "=", "self", ".", "kwargs", "[", "'slug'", "]", ")", "return", "PlugEvents", ".", "objects", ".", "filter", "(", "Bree...
The queryset is over-ridden to show only plug events in which the strain matches the breeding strain.
[ "The", "queryset", "is", "over", "-", "ridden", "to", "show", "only", "plug", "events", "in", "which", "the", "strain", "matches", "the", "breeding", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/timed_mating/views.py#L40-L43
darkfeline/animanager
animanager/commands/fix.py
command
def command(state, args): """Fix cache issues caused by schema pre-v4.""" if len(args) > 1: print(f'Usage: {args[0]}') return db = state.db _refresh_incomplete_anime(db) _fix_cached_completed(db)
python
def command(state, args): """Fix cache issues caused by schema pre-v4.""" if len(args) > 1: print(f'Usage: {args[0]}') return db = state.db _refresh_incomplete_anime(db) _fix_cached_completed(db)
[ "def", "command", "(", "state", ",", "args", ")", ":", "if", "len", "(", "args", ")", ">", "1", ":", "print", "(", "f'Usage: {args[0]}'", ")", "return", "db", "=", "state", ".", "db", "_refresh_incomplete_anime", "(", "db", ")", "_fix_cached_completed", ...
Fix cache issues caused by schema pre-v4.
[ "Fix", "cache", "issues", "caused", "by", "schema", "pre", "-", "v4", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/fix.py#L24-L31
gkmngrgn/radpress
radpress/readers/markdown_reader.py
Reader.read
def read(self): """Parse content and metadata of markdown files""" self.convertRSTmetaToMD() self._md = Markdown(extensions=['meta', 'codehilite(linenums=True)']) content = self._md.convert(self.source) metadata = self._parse_metadata(self._md.Meta) return content, metadata
python
def read(self): """Parse content and metadata of markdown files""" self.convertRSTmetaToMD() self._md = Markdown(extensions=['meta', 'codehilite(linenums=True)']) content = self._md.convert(self.source) metadata = self._parse_metadata(self._md.Meta) return content, metadata
[ "def", "read", "(", "self", ")", ":", "self", ".", "convertRSTmetaToMD", "(", ")", "self", ".", "_md", "=", "Markdown", "(", "extensions", "=", "[", "'meta'", ",", "'codehilite(linenums=True)'", "]", ")", "content", "=", "self", ".", "_md", ".", "convert...
Parse content and metadata of markdown files
[ "Parse", "content", "and", "metadata", "of", "markdown", "files" ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/readers/markdown_reader.py#L94-L100
Frojd/Fabrik
fabrik/ext/postgres.py
backup_db
def backup_db(release=None, limit=5): """ Backup database and associate it with current release """ assert "psql_user" in env, "Missing psql_user in env" assert "psql_db" in env, "Missing psql_db in env" assert "psql_password" in env, "Missing psql_password in env" if not release: release = paths.get_current_release_name() max_versions = limit+1 if not release: logger.info("No releases present, skipping task") return remote_file = "postgresql/%s.sql.tar.gz" % release remote_path = paths.get_backup_path(remote_file) env.run("mkdir -p %s" % paths.get_backup_path("postgresql")) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( remote_path, env.psql_user, env.psql_db )) # Remove older releases env.run("ls -dt %s/* | tail -n +%s | xargs rm -rf" % ( paths.get_backup_path("postgresql"), max_versions) )
python
def backup_db(release=None, limit=5): """ Backup database and associate it with current release """ assert "psql_user" in env, "Missing psql_user in env" assert "psql_db" in env, "Missing psql_db in env" assert "psql_password" in env, "Missing psql_password in env" if not release: release = paths.get_current_release_name() max_versions = limit+1 if not release: logger.info("No releases present, skipping task") return remote_file = "postgresql/%s.sql.tar.gz" % release remote_path = paths.get_backup_path(remote_file) env.run("mkdir -p %s" % paths.get_backup_path("postgresql")) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( remote_path, env.psql_user, env.psql_db )) # Remove older releases env.run("ls -dt %s/* | tail -n +%s | xargs rm -rf" % ( paths.get_backup_path("postgresql"), max_versions) )
[ "def", "backup_db", "(", "release", "=", "None", ",", "limit", "=", "5", ")", ":", "assert", "\"psql_user\"", "in", "env", ",", "\"Missing psql_user in env\"", "assert", "\"psql_db\"", "in", "env", ",", "\"Missing psql_db in env\"", "assert", "\"psql_password\"", ...
Backup database and associate it with current release
[ "Backup", "database", "and", "associate", "it", "with", "current", "release" ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/postgres.py#L50-L82
Frojd/Fabrik
fabrik/ext/postgres.py
restore_db
def restore_db(release=None): """ Restores backup back to version, uses current version by default. """ if not release: release = paths.get_current_release_name() if not release: raise Exception("Release %s was not found" % release) backup_file = "postgresql/%s.sql.gz" % release backup_path = paths.get_backup_path(backup_file) if not env.exists(backup_path): raise Exception("Backup file %s not found" % backup_path) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, backup_path) )
python
def restore_db(release=None): """ Restores backup back to version, uses current version by default. """ if not release: release = paths.get_current_release_name() if not release: raise Exception("Release %s was not found" % release) backup_file = "postgresql/%s.sql.gz" % release backup_path = paths.get_backup_path(backup_file) if not env.exists(backup_path): raise Exception("Backup file %s not found" % backup_path) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, backup_path) )
[ "def", "restore_db", "(", "release", "=", "None", ")", ":", "if", "not", "release", ":", "release", "=", "paths", ".", "get_current_release_name", "(", ")", "if", "not", "release", ":", "raise", "Exception", "(", "\"Release %s was not found\"", "%", "release",...
Restores backup back to version, uses current version by default.
[ "Restores", "backup", "back", "to", "version", "uses", "current", "version", "by", "default", "." ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/postgres.py#L86-L108
Frojd/Fabrik
fabrik/ext/postgres.py
sync_local_to_remote
def sync_local_to_remote(force="no"): """ Sync your local postgres database with remote Example: fabrik prod sync_local_to_remote:force=yes """ _check_requirements() if force != "yes": message = "This will replace the remote database '%s' with your "\ "local '%s', are you sure [y/n]" % (env.psql_db, env.local_psql_db) answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik # Create database dump local_file = "sync_%s.sql.tar.gz" % int(time.time()*1000) local_path = "/tmp/%s" % local_file with context_managers.shell_env(PGPASSWORD=env.local_psql_password): elocal("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( local_path, env.local_psql_user, env.local_psql_db )) remote_path = "/tmp/%s" % local_file # Upload sync file put(remote_path, local_path) # Import sync file by performing the following task (drop, create, import) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, remote_path) ) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path) # Trigger hook run_hook("postgres.after_sync_local_to_remote") logger.info("Sync complete")
python
def sync_local_to_remote(force="no"): """ Sync your local postgres database with remote Example: fabrik prod sync_local_to_remote:force=yes """ _check_requirements() if force != "yes": message = "This will replace the remote database '%s' with your "\ "local '%s', are you sure [y/n]" % (env.psql_db, env.local_psql_db) answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik # Create database dump local_file = "sync_%s.sql.tar.gz" % int(time.time()*1000) local_path = "/tmp/%s" % local_file with context_managers.shell_env(PGPASSWORD=env.local_psql_password): elocal("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( local_path, env.local_psql_user, env.local_psql_db )) remote_path = "/tmp/%s" % local_file # Upload sync file put(remote_path, local_path) # Import sync file by performing the following task (drop, create, import) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.psql_db, env.psql_user, remote_path) ) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path) # Trigger hook run_hook("postgres.after_sync_local_to_remote") logger.info("Sync complete")
[ "def", "sync_local_to_remote", "(", "force", "=", "\"no\"", ")", ":", "_check_requirements", "(", ")", "if", "force", "!=", "\"yes\"", ":", "message", "=", "\"This will replace the remote database '%s' with your \"", "\"local '%s', are you sure [y/n]\"", "%", "(", "env", ...
Sync your local postgres database with remote Example: fabrik prod sync_local_to_remote:force=yes
[ "Sync", "your", "local", "postgres", "database", "with", "remote" ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/postgres.py#L112-L162
Frojd/Fabrik
fabrik/ext/postgres.py
sync_remote_to_local
def sync_remote_to_local(force="no"): """ Sync your remote postgres database with local Example: fabrik prod sync_remote_to_local """ _check_requirements() if force != "yes": message = "This will replace your local database '%s' with the "\ "remote '%s', are you sure [y/n]" % (env.local_psql_db, env.psql_db) answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik # Create database dump remote_file = "postgresql/sync_%s.sql.tar.gz" % int(time.time()*1000) remote_path = paths.get_backup_path(remote_file) env.run("mkdir -p %s" % paths.get_backup_path("postgresql")) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( remote_path, env.psql_user, env.psql_db )) local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) # Import sync file by performing the following task (drop, create, import) with context_managers.shell_env(PGPASSWORD=env.local_psql_password): elocal("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.local_psql_db, env.local_psql_user, local_path) ) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path) # Trigger hook run_hook("postgres.after_sync_remote_to_local") logger.info("Sync complete")
python
def sync_remote_to_local(force="no"): """ Sync your remote postgres database with local Example: fabrik prod sync_remote_to_local """ _check_requirements() if force != "yes": message = "This will replace your local database '%s' with the "\ "remote '%s', are you sure [y/n]" % (env.local_psql_db, env.psql_db) answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik # Create database dump remote_file = "postgresql/sync_%s.sql.tar.gz" % int(time.time()*1000) remote_path = paths.get_backup_path(remote_file) env.run("mkdir -p %s" % paths.get_backup_path("postgresql")) with context_managers.shell_env(PGPASSWORD=env.psql_password): env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( remote_path, env.psql_user, env.psql_db )) local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) # Import sync file by performing the following task (drop, create, import) with context_managers.shell_env(PGPASSWORD=env.local_psql_password): elocal("pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env.local_psql_db, env.local_psql_user, local_path) ) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path) # Trigger hook run_hook("postgres.after_sync_remote_to_local") logger.info("Sync complete")
[ "def", "sync_remote_to_local", "(", "force", "=", "\"no\"", ")", ":", "_check_requirements", "(", ")", "if", "force", "!=", "\"yes\"", ":", "message", "=", "\"This will replace your local database '%s' with the \"", "\"remote '%s', are you sure [y/n]\"", "%", "(", "env", ...
Sync your remote postgres database with local Example: fabrik prod sync_remote_to_local
[ "Sync", "your", "remote", "postgres", "database", "with", "local" ]
train
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/postgres.py#L166-L218
gkmngrgn/radpress
radpress/compat.py
get_user_model
def get_user_model(): """ Returns the user model to use at runtime. :return: User or custom user """ if DJANGO_VERSION >= (1, 5): from django.contrib.auth import get_user_model return get_user_model() # NOQA else: from django.contrib.auth.models import User # NOQA return User
python
def get_user_model(): """ Returns the user model to use at runtime. :return: User or custom user """ if DJANGO_VERSION >= (1, 5): from django.contrib.auth import get_user_model return get_user_model() # NOQA else: from django.contrib.auth.models import User # NOQA return User
[ "def", "get_user_model", "(", ")", ":", "if", "DJANGO_VERSION", ">=", "(", "1", ",", "5", ")", ":", "from", "django", ".", "contrib", ".", "auth", "import", "get_user_model", "return", "get_user_model", "(", ")", "# NOQA", "else", ":", "from", "django", ...
Returns the user model to use at runtime. :return: User or custom user
[ "Returns", "the", "user", "model", "to", "use", "at", "runtime", ".", ":", "return", ":", "User", "or", "custom", "user" ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/compat.py#L5-L16
gkmngrgn/radpress
radpress/compat.py
has_app
def has_app(app_name): """ Determines whether an app is listed in INSTALLED_APPS or the app registry. :param app_name: string :return: bool """ if DJANGO_VERSION >= (1, 7): from django.apps import apps return apps.is_installed(app_name) else: from django.conf import settings return app_name in settings.INSTALLED_APPS
python
def has_app(app_name): """ Determines whether an app is listed in INSTALLED_APPS or the app registry. :param app_name: string :return: bool """ if DJANGO_VERSION >= (1, 7): from django.apps import apps return apps.is_installed(app_name) else: from django.conf import settings return app_name in settings.INSTALLED_APPS
[ "def", "has_app", "(", "app_name", ")", ":", "if", "DJANGO_VERSION", ">=", "(", "1", ",", "7", ")", ":", "from", "django", ".", "apps", "import", "apps", "return", "apps", ".", "is_installed", "(", "app_name", ")", "else", ":", "from", "django", ".", ...
Determines whether an app is listed in INSTALLED_APPS or the app registry. :param app_name: string :return: bool
[ "Determines", "whether", "an", "app", "is", "listed", "in", "INSTALLED_APPS", "or", "the", "app", "registry", ".", ":", "param", "app_name", ":", "string", ":", "return", ":", "bool" ]
train
https://github.com/gkmngrgn/radpress/blob/2ed3b97f94e722479601832ffc40ea2135cda916/radpress/compat.py#L30-L42
wglass/lighthouse
lighthouse/checks/tcp.py
TCPCheck.apply_check_config
def apply_check_config(self, config): """ Takes the `query` and `response` fields from a validated config dictionary and sets the proper instance attributes. """ self.query = config.get("query") self.expected_response = config.get("response")
python
def apply_check_config(self, config): """ Takes the `query` and `response` fields from a validated config dictionary and sets the proper instance attributes. """ self.query = config.get("query") self.expected_response = config.get("response")
[ "def", "apply_check_config", "(", "self", ",", "config", ")", ":", "self", ".", "query", "=", "config", ".", "get", "(", "\"query\"", ")", "self", ".", "expected_response", "=", "config", ".", "get", "(", "\"response\"", ")" ]
Takes the `query` and `response` fields from a validated config dictionary and sets the proper instance attributes.
[ "Takes", "the", "query", "and", "response", "fields", "from", "a", "validated", "config", "dictionary", "and", "sets", "the", "proper", "instance", "attributes", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/tcp.py#L46-L52
wglass/lighthouse
lighthouse/checks/tcp.py
TCPCheck.perform
def perform(self): """ Performs a straightforward TCP request and response. Sends the TCP `query` to the proper host and port, and loops over the socket, gathering response chunks until a full line is acquired. If the response line matches the expected value, the check passes. If not, the check fails. The check will also fail if there's an error during any step of the send/receive process. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) # if no query/response is defined, a successful connection is a pass if not self.query: sock.close() return True try: sock.sendall(self.query) except Exception: logger.exception("Error sending TCP query message.") sock.close() return False response, extra = sockutils.get_response(sock) logger.debug("response: %s (extra: %s)", response, extra) if response != self.expected_response: logger.warn( "Response does not match expected value: %s (expected %s)", response, self.expected_response ) sock.close() return False sock.close() return True
python
def perform(self): """ Performs a straightforward TCP request and response. Sends the TCP `query` to the proper host and port, and loops over the socket, gathering response chunks until a full line is acquired. If the response line matches the expected value, the check passes. If not, the check fails. The check will also fail if there's an error during any step of the send/receive process. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) # if no query/response is defined, a successful connection is a pass if not self.query: sock.close() return True try: sock.sendall(self.query) except Exception: logger.exception("Error sending TCP query message.") sock.close() return False response, extra = sockutils.get_response(sock) logger.debug("response: %s (extra: %s)", response, extra) if response != self.expected_response: logger.warn( "Response does not match expected value: %s (expected %s)", response, self.expected_response ) sock.close() return False sock.close() return True
[ "def", "perform", "(", "self", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "# if n...
Performs a straightforward TCP request and response. Sends the TCP `query` to the proper host and port, and loops over the socket, gathering response chunks until a full line is acquired. If the response line matches the expected value, the check passes. If not, the check fails. The check will also fail if there's an error during any step of the send/receive process.
[ "Performs", "a", "straightforward", "TCP", "request", "and", "response", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/tcp.py#L54-L94
caesar0301/relogger
relogger/config_parser.py
RLConfig._get_section_values
def _get_section_values(self, config, section): """ extract src and dst values from a section """ src_host = self._get_hosts_from_names(config.get(section, 'src.host')) \ if config.has_option(section, 'src.host') else None src_file = [self._get_abs_filepath(config.get(section, 'src.file'))] \ if config.has_option(section, 'src.file') else None if src_host is None and src_file is None: raise conferr('Section "%s" gets no sources' % section) dst_host = self._get_hosts_from_names(config.get(section, 'dst.host')) \ if config.has_option(section, 'dst.host') else None dst_file = [self._get_abs_filepath(config.get(section, 'dst.file'))] \ if config.has_option(section, 'dst.file') else None if dst_host is None and dst_file is None: raise conferr('Section "%s" gets no destinations' % section) return (src_host, src_file, dst_host, dst_file)
python
def _get_section_values(self, config, section): """ extract src and dst values from a section """ src_host = self._get_hosts_from_names(config.get(section, 'src.host')) \ if config.has_option(section, 'src.host') else None src_file = [self._get_abs_filepath(config.get(section, 'src.file'))] \ if config.has_option(section, 'src.file') else None if src_host is None and src_file is None: raise conferr('Section "%s" gets no sources' % section) dst_host = self._get_hosts_from_names(config.get(section, 'dst.host')) \ if config.has_option(section, 'dst.host') else None dst_file = [self._get_abs_filepath(config.get(section, 'dst.file'))] \ if config.has_option(section, 'dst.file') else None if dst_host is None and dst_file is None: raise conferr('Section "%s" gets no destinations' % section) return (src_host, src_file, dst_host, dst_file)
[ "def", "_get_section_values", "(", "self", ",", "config", ",", "section", ")", ":", "src_host", "=", "self", ".", "_get_hosts_from_names", "(", "config", ".", "get", "(", "section", ",", "'src.host'", ")", ")", "if", "config", ".", "has_option", "(", "sect...
extract src and dst values from a section
[ "extract", "src", "and", "dst", "values", "from", "a", "section" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L118-L135
caesar0301/relogger
relogger/config_parser.py
RLConfig._assemble_flowtable
def _assemble_flowtable(self, values): """ generate a flowtable from a tuple of descriptors. """ values = map(lambda x: [] if x is None else x, values) src = values[0] + values[1] dst = values[2] + values[3] thistable = dict() for s in src: thistable[s] = dst return thistable
python
def _assemble_flowtable(self, values): """ generate a flowtable from a tuple of descriptors. """ values = map(lambda x: [] if x is None else x, values) src = values[0] + values[1] dst = values[2] + values[3] thistable = dict() for s in src: thistable[s] = dst return thistable
[ "def", "_assemble_flowtable", "(", "self", ",", "values", ")", ":", "values", "=", "map", "(", "lambda", "x", ":", "[", "]", "if", "x", "is", "None", "else", "x", ",", "values", ")", "src", "=", "values", "[", "0", "]", "+", "values", "[", "1", ...
generate a flowtable from a tuple of descriptors.
[ "generate", "a", "flowtable", "from", "a", "tuple", "of", "descriptors", "." ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L137-L147
caesar0301/relogger
relogger/config_parser.py
RLConfig._detect_loop
def _detect_loop(self): """ detect loops in flow table, raise error if being present """ for source, dests in self.flowtable.items(): if source in dests: raise conferr('Loops detected: %s --> %s' % (source, source))
python
def _detect_loop(self): """ detect loops in flow table, raise error if being present """ for source, dests in self.flowtable.items(): if source in dests: raise conferr('Loops detected: %s --> %s' % (source, source))
[ "def", "_detect_loop", "(", "self", ")", ":", "for", "source", ",", "dests", "in", "self", ".", "flowtable", ".", "items", "(", ")", ":", "if", "source", "in", "dests", ":", "raise", "conferr", "(", "'Loops detected: %s --> %s'", "%", "(", "source", ",",...
detect loops in flow table, raise error if being present
[ "detect", "loops", "in", "flow", "table", "raise", "error", "if", "being", "present" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L149-L154
caesar0301/relogger
relogger/config_parser.py
RLConfig._get_hosts_from_ports
def _get_hosts_from_ports(self, ports): """ validate hostnames from a list of ports """ hosts = map(lambda x: 'localhost:%d' % int(x.strip()), ports.split(',')) return list(set(hosts))
python
def _get_hosts_from_ports(self, ports): """ validate hostnames from a list of ports """ hosts = map(lambda x: 'localhost:%d' % int(x.strip()), ports.split(',')) return list(set(hosts))
[ "def", "_get_hosts_from_ports", "(", "self", ",", "ports", ")", ":", "hosts", "=", "map", "(", "lambda", "x", ":", "'localhost:%d'", "%", "int", "(", "x", ".", "strip", "(", ")", ")", ",", "ports", ".", "split", "(", "','", ")", ")", "return", "lis...
validate hostnames from a list of ports
[ "validate", "hostnames", "from", "a", "list", "of", "ports" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L156-L160
caesar0301/relogger
relogger/config_parser.py
RLConfig._get_hosts_from_names
def _get_hosts_from_names(self, names): """ validate hostnames from a list of names """ result = set() hosts = map(lambda x: x.strip(), names.split(',')) for h in hosts: if valid_hostname(h.split(':')[0]): result.add(h if ':' in h else '%s:%d' % (h, self.PORT)) else: raise conferr('Invalid hostname: %s' % h.split(':')[0]) return list(result)
python
def _get_hosts_from_names(self, names): """ validate hostnames from a list of names """ result = set() hosts = map(lambda x: x.strip(), names.split(',')) for h in hosts: if valid_hostname(h.split(':')[0]): result.add(h if ':' in h else '%s:%d' % (h, self.PORT)) else: raise conferr('Invalid hostname: %s' % h.split(':')[0]) return list(result)
[ "def", "_get_hosts_from_names", "(", "self", ",", "names", ")", ":", "result", "=", "set", "(", ")", "hosts", "=", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "names", ".", "split", "(", "','", ")", ")", "for", "h", "in", ...
validate hostnames from a list of names
[ "validate", "hostnames", "from", "a", "list", "of", "names" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L162-L172
caesar0301/relogger
relogger/config_parser.py
RLConfig._get_abs_filepath
def _get_abs_filepath(self, ifile): """ validate src or dst file path with self.config_file """ assert ifile is not None ifile = ifile[7:] if ifile.startswith('file://') else ifile if ifile[0] != '/': basedir = os.path.abspath(os.path.dirname(self.config_file)) ifile = os.path.join(basedir, ifile) return 'file://' + ifile
python
def _get_abs_filepath(self, ifile): """ validate src or dst file path with self.config_file """ assert ifile is not None ifile = ifile[7:] if ifile.startswith('file://') else ifile if ifile[0] != '/': basedir = os.path.abspath(os.path.dirname(self.config_file)) ifile = os.path.join(basedir, ifile) return 'file://' + ifile
[ "def", "_get_abs_filepath", "(", "self", ",", "ifile", ")", ":", "assert", "ifile", "is", "not", "None", "ifile", "=", "ifile", "[", "7", ":", "]", "if", "ifile", ".", "startswith", "(", "'file://'", ")", "else", "ifile", "if", "ifile", "[", "0", "]"...
validate src or dst file path with self.config_file
[ "validate", "src", "or", "dst", "file", "path", "with", "self", ".", "config_file" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L174-L182
caesar0301/relogger
relogger/config_parser.py
RLConfig.flowtable
def flowtable(self): """ get a flat flow table globally """ ftable = dict() for table in self.flow_table: for k, v in table.items(): if k not in ftable: ftable[k] = set(v) else: [ftable[k].add(i) for i in v] # convert set to list for k in ftable: ftable[k] = list(ftable[k]) return ftable
python
def flowtable(self): """ get a flat flow table globally """ ftable = dict() for table in self.flow_table: for k, v in table.items(): if k not in ftable: ftable[k] = set(v) else: [ftable[k].add(i) for i in v] # convert set to list for k in ftable: ftable[k] = list(ftable[k]) return ftable
[ "def", "flowtable", "(", "self", ")", ":", "ftable", "=", "dict", "(", ")", "for", "table", "in", "self", ".", "flow_table", ":", "for", "k", ",", "v", "in", "table", ".", "items", "(", ")", ":", "if", "k", "not", "in", "ftable", ":", "ftable", ...
get a flat flow table globally
[ "get", "a", "flat", "flow", "table", "globally" ]
train
https://github.com/caesar0301/relogger/blob/40b722ad2115ac6a179e2cc4eb0c88333f5114de/relogger/config_parser.py#L185-L198
darkfeline/animanager
animanager/cmd/results/aid.py
_set_last_aid
def _set_last_aid(func): """Decorator for setting last_aid.""" @functools.wraps(func) def new_func(self, *args, **kwargs): # pylint: disable=missing-docstring aid = func(self, *args, **kwargs) self.last_aid = aid return aid return new_func
python
def _set_last_aid(func): """Decorator for setting last_aid.""" @functools.wraps(func) def new_func(self, *args, **kwargs): # pylint: disable=missing-docstring aid = func(self, *args, **kwargs) self.last_aid = aid return aid return new_func
[ "def", "_set_last_aid", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "new_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=missing-docstring", "aid", "=", "func", "(", "self", ...
Decorator for setting last_aid.
[ "Decorator", "for", "setting", "last_aid", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/cmd/results/aid.py#L46-L54
darkfeline/animanager
animanager/cmd/results/aid.py
AIDResultsManager.parse_aid
def parse_aid(self, text, default_key): """Parse argument text for aid. May retrieve the aid from search result tables as necessary. aresults determines which search results to use by default; True means aresults is the default. The last aid when no aid has been parsed yet is undefined. The accepted formats, in order: Last AID: . Explicit AID: aid:12345 Explicit result number: key:12 Default result number: 12 """ if default_key not in self: raise ResultKeyError(default_key) if text == '.': return self.last_aid elif text.startswith('aid:'): return int(text[len('aid:'):]) if ':' in text: match = self._key_pattern.search(text) if not match: raise InvalidSyntaxError(text) key = match.group(1) number = match.group(2) else: key = default_key number = text try: number = int(number) except ValueError: raise InvalidSyntaxError(number) try: return self[key].get_aid(number) except KeyError: raise ResultKeyError(key) except IndexError: raise ResultNumberError(key, number)
python
def parse_aid(self, text, default_key): """Parse argument text for aid. May retrieve the aid from search result tables as necessary. aresults determines which search results to use by default; True means aresults is the default. The last aid when no aid has been parsed yet is undefined. The accepted formats, in order: Last AID: . Explicit AID: aid:12345 Explicit result number: key:12 Default result number: 12 """ if default_key not in self: raise ResultKeyError(default_key) if text == '.': return self.last_aid elif text.startswith('aid:'): return int(text[len('aid:'):]) if ':' in text: match = self._key_pattern.search(text) if not match: raise InvalidSyntaxError(text) key = match.group(1) number = match.group(2) else: key = default_key number = text try: number = int(number) except ValueError: raise InvalidSyntaxError(number) try: return self[key].get_aid(number) except KeyError: raise ResultKeyError(key) except IndexError: raise ResultNumberError(key, number)
[ "def", "parse_aid", "(", "self", ",", "text", ",", "default_key", ")", ":", "if", "default_key", "not", "in", "self", ":", "raise", "ResultKeyError", "(", "default_key", ")", "if", "text", "==", "'.'", ":", "return", "self", ".", "last_aid", "elif", "tex...
Parse argument text for aid. May retrieve the aid from search result tables as necessary. aresults determines which search results to use by default; True means aresults is the default. The last aid when no aid has been parsed yet is undefined. The accepted formats, in order: Last AID: . Explicit AID: aid:12345 Explicit result number: key:12 Default result number: 12
[ "Parse", "argument", "text", "for", "aid", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/cmd/results/aid.py#L83-L128
peterldowns/python-mustache
mustache/__init__.py
template
def template(relative_path, *args, **kwargs): """ A decorator for easily rendering templates. Use as follows: main.py: from mustache import (template) @template('../tests/static/say_hello.html') def index(): context = {'name' : 'world'} partials = {} return context, partials if __name__=="__main__": print index() static/say_hello.html: <h1> Hello, {{name}}! </h1> from the command line: > python main.py <h1> Hello, world! </h1> """ directory, filename = os.path.split(relative_path) partials_dir = os.path.abspath(directory) name, ext = os.path.splitext(filename) state = State(partials_dir=directory, extension=ext, *args, **kwargs) template = load_template(name, directory, ext, state.encoding, state.encoding_error) def wrapper(fn): def render_template(*args, **kwargs): res = fn(*args, **kwargs) if isinstance(res, tuple): if len(res) == 2: (new_context, partials) = res elif len(res) == 1: (new_context, partials) = (res[0], {}) elif isinstance(res, dict): (new_context, partials) = (res, {}) else: (new_context, partials) = ({}, {}) context = copy(template_globals) context.update(new_context) return render(template, context, partials, state) return render_template return wrapper
python
def template(relative_path, *args, **kwargs): """ A decorator for easily rendering templates. Use as follows: main.py: from mustache import (template) @template('../tests/static/say_hello.html') def index(): context = {'name' : 'world'} partials = {} return context, partials if __name__=="__main__": print index() static/say_hello.html: <h1> Hello, {{name}}! </h1> from the command line: > python main.py <h1> Hello, world! </h1> """ directory, filename = os.path.split(relative_path) partials_dir = os.path.abspath(directory) name, ext = os.path.splitext(filename) state = State(partials_dir=directory, extension=ext, *args, **kwargs) template = load_template(name, directory, ext, state.encoding, state.encoding_error) def wrapper(fn): def render_template(*args, **kwargs): res = fn(*args, **kwargs) if isinstance(res, tuple): if len(res) == 2: (new_context, partials) = res elif len(res) == 1: (new_context, partials) = (res[0], {}) elif isinstance(res, dict): (new_context, partials) = (res, {}) else: (new_context, partials) = ({}, {}) context = copy(template_globals) context.update(new_context) return render(template, context, partials, state) return render_template return wrapper
[ "def", "template", "(", "relative_path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "directory", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "relative_path", ")", "partials_dir", "=", "os", ".", "path", ".", "abspath", "(", ...
A decorator for easily rendering templates. Use as follows: main.py: from mustache import (template) @template('../tests/static/say_hello.html') def index(): context = {'name' : 'world'} partials = {} return context, partials if __name__=="__main__": print index() static/say_hello.html: <h1> Hello, {{name}}! </h1> from the command line: > python main.py <h1> Hello, world! </h1>
[ "A", "decorator", "for", "easily", "rendering", "templates", ".", "Use", "as", "follows", ":" ]
train
https://github.com/peterldowns/python-mustache/blob/ea3753696ea9886b6eb39cc5de27db7054adc069/mustache/__init__.py#L27-L76
darkfeline/animanager
animanager/animecmd.py
_connect
def _connect(dbfile: 'PathLike') -> apsw.Connection: """Connect to SQLite database file.""" conn = apsw.Connection(os.fspath(dbfile)) _set_foreign_keys(conn, 1) assert _get_foreign_keys(conn) == 1 return conn
python
def _connect(dbfile: 'PathLike') -> apsw.Connection: """Connect to SQLite database file.""" conn = apsw.Connection(os.fspath(dbfile)) _set_foreign_keys(conn, 1) assert _get_foreign_keys(conn) == 1 return conn
[ "def", "_connect", "(", "dbfile", ":", "'PathLike'", ")", "->", "apsw", ".", "Connection", ":", "conn", "=", "apsw", ".", "Connection", "(", "os", ".", "fspath", "(", "dbfile", ")", ")", "_set_foreign_keys", "(", "conn", ",", "1", ")", "assert", "_get_...
Connect to SQLite database file.
[ "Connect", "to", "SQLite", "database", "file", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/animecmd.py#L127-L132
darkfeline/animanager
animanager/animecmd.py
AnimeCmd.cmdloop
def cmdloop(self): """Start CLI REPL.""" while True: cmdline = input(self.prompt) tokens = shlex.split(cmdline) if not tokens: if self.last_cmd: tokens = self.last_cmd else: print('No previous command.') continue if tokens[0] not in self.commands: print('Invalid command') continue command = self.commands[tokens[0]] self.last_cmd = tokens try: if command(self.state, tokens): break except CmdExit: continue except Exception as e: if e not in self.safe_exceptions: logger.exception('Error!')
python
def cmdloop(self): """Start CLI REPL.""" while True: cmdline = input(self.prompt) tokens = shlex.split(cmdline) if not tokens: if self.last_cmd: tokens = self.last_cmd else: print('No previous command.') continue if tokens[0] not in self.commands: print('Invalid command') continue command = self.commands[tokens[0]] self.last_cmd = tokens try: if command(self.state, tokens): break except CmdExit: continue except Exception as e: if e not in self.safe_exceptions: logger.exception('Error!')
[ "def", "cmdloop", "(", "self", ")", ":", "while", "True", ":", "cmdline", "=", "input", "(", "self", ".", "prompt", ")", "tokens", "=", "shlex", ".", "split", "(", "cmdline", ")", "if", "not", "tokens", ":", "if", "self", ".", "last_cmd", ":", "tok...
Start CLI REPL.
[ "Start", "CLI", "REPL", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/animecmd.py#L86-L109
ucbvislab/radiotool
radiotool/composer/volume.py
Volume.to_array
def to_array(self, channels=2): """Generate the array of multipliers for the dynamic""" return np.linspace(self.volume, self.volume, self.duration * channels).reshape(self.duration, channels)
python
def to_array(self, channels=2): """Generate the array of multipliers for the dynamic""" return np.linspace(self.volume, self.volume, self.duration * channels).reshape(self.duration, channels)
[ "def", "to_array", "(", "self", ",", "channels", "=", "2", ")", ":", "return", "np", ".", "linspace", "(", "self", ".", "volume", ",", "self", ".", "volume", ",", "self", ".", "duration", "*", "channels", ")", ".", "reshape", "(", "self", ".", "dur...
Generate the array of multipliers for the dynamic
[ "Generate", "the", "array", "of", "multipliers", "for", "the", "dynamic" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/volume.py#L29-L32
davebridges/mousedb
mousedb/data/views.py
add_measurement
def add_measurement(request, experiment_id): """This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.""" experiment = get_object_or_404(Experiment, pk=experiment_id) if request.method == 'POST': form = MeasurementForm(request.POST) if form.is_valid(): form.save() return HttpResponseRedirect( experiment.get_absolute_url() ) else: form = MeasurementForm() return render(request, "data_entry_form.html", {"form": form, "experiment": experiment })
python
def add_measurement(request, experiment_id): """This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.""" experiment = get_object_or_404(Experiment, pk=experiment_id) if request.method == 'POST': form = MeasurementForm(request.POST) if form.is_valid(): form.save() return HttpResponseRedirect( experiment.get_absolute_url() ) else: form = MeasurementForm() return render(request, "data_entry_form.html", {"form": form, "experiment": experiment })
[ "def", "add_measurement", "(", "request", ",", "experiment_id", ")", ":", "experiment", "=", "get_object_or_404", "(", "Experiment", ",", "pk", "=", "experiment_id", ")", "if", "request", ".", "method", "==", "'POST'", ":", "form", "=", "MeasurementForm", "(",...
This is a view to display a form to add single measurements to an experiment. It calls the object MeasurementForm, which has an autocomplete field for animal.
[ "This", "is", "a", "view", "to", "display", "a", "form", "to", "add", "single", "measurements", "to", "an", "experiment", ".", "It", "calls", "the", "object", "MeasurementForm", "which", "has", "an", "autocomplete", "field", "for", "animal", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L194-L206
davebridges/mousedb
mousedb/data/views.py
experiment_details_csv
def experiment_details_csv(request, pk): """This view generates a csv output file of an experiment. The view writes to a csv table the animal, genotype, age (in days), assay and values.""" experiment = get_object_or_404(Experiment, pk=pk) response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=experiment.csv' writer = csv.writer(response) writer.writerow(["Animal","Cage", "Strain", "Genotype", "Gender","Age", "Assay", "Values", "Feeding", "Experiment Date", "Treatment"]) for measurement in experiment.measurement_set.iterator(): writer.writerow([ measurement.animal, measurement.animal.Cage, measurement.animal.Strain, measurement.animal.Genotype, measurement.animal.Gender, measurement.age(), measurement.assay, measurement.values, measurement.experiment.feeding_state, measurement.experiment.date, measurement.animal.treatment_set.all() ]) return response
python
def experiment_details_csv(request, pk): """This view generates a csv output file of an experiment. The view writes to a csv table the animal, genotype, age (in days), assay and values.""" experiment = get_object_or_404(Experiment, pk=pk) response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=experiment.csv' writer = csv.writer(response) writer.writerow(["Animal","Cage", "Strain", "Genotype", "Gender","Age", "Assay", "Values", "Feeding", "Experiment Date", "Treatment"]) for measurement in experiment.measurement_set.iterator(): writer.writerow([ measurement.animal, measurement.animal.Cage, measurement.animal.Strain, measurement.animal.Genotype, measurement.animal.Gender, measurement.age(), measurement.assay, measurement.values, measurement.experiment.feeding_state, measurement.experiment.date, measurement.animal.treatment_set.all() ]) return response
[ "def", "experiment_details_csv", "(", "request", ",", "pk", ")", ":", "experiment", "=", "get_object_or_404", "(", "Experiment", ",", "pk", "=", "pk", ")", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "response", "[", "'Content...
This view generates a csv output file of an experiment. The view writes to a csv table the animal, genotype, age (in days), assay and values.
[ "This", "view", "generates", "a", "csv", "output", "file", "of", "an", "experiment", ".", "The", "view", "writes", "to", "a", "csv", "table", "the", "animal", "genotype", "age", "(", "in", "days", ")", "assay", "and", "values", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L225-L248
davebridges/mousedb
mousedb/data/views.py
aging_csv
def aging_csv(request): """This view generates a csv output file of all animal data for use in aging analysis. The view writes to a csv table the animal, strain, genotype, age (in days), and cause of death.""" animal_list = Animal.objects.all() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=aging.csv' writer = csv.writer(response) writer.writerow(["Animal", "Strain", "Genotype", "Gender", "Age", "Death", "Alive"]) for animal in animal_list.iterator(): writer.writerow([ animal.MouseID, animal.Strain, animal.Genotype, animal.Gender, animal.age(), animal.Cause_of_Death, animal.Alive ]) return response
python
def aging_csv(request): """This view generates a csv output file of all animal data for use in aging analysis. The view writes to a csv table the animal, strain, genotype, age (in days), and cause of death.""" animal_list = Animal.objects.all() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=aging.csv' writer = csv.writer(response) writer.writerow(["Animal", "Strain", "Genotype", "Gender", "Age", "Death", "Alive"]) for animal in animal_list.iterator(): writer.writerow([ animal.MouseID, animal.Strain, animal.Genotype, animal.Gender, animal.age(), animal.Cause_of_Death, animal.Alive ]) return response
[ "def", "aging_csv", "(", "request", ")", ":", "animal_list", "=", "Animal", ".", "objects", ".", "all", "(", ")", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; fi...
This view generates a csv output file of all animal data for use in aging analysis. The view writes to a csv table the animal, strain, genotype, age (in days), and cause of death.
[ "This", "view", "generates", "a", "csv", "output", "file", "of", "all", "animal", "data", "for", "use", "in", "aging", "analysis", ".", "The", "view", "writes", "to", "a", "csv", "table", "the", "animal", "strain", "genotype", "age", "(", "in", "days", ...
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L251-L270
davebridges/mousedb
mousedb/data/views.py
litters_csv
def litters_csv(request): """This view generates a csv output file of all animal data for use in litter analysis. The view writes to a csv table the birthdate, breeding cage and strain.""" animal_list = Animal.objects.all() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=litters.csv' writer = csv.writer(response) writer.writerow(["Born", "Breeding", "Strain"]) for animal in animal_list: writer.writerow([ animal.Born, animal.Breeding, animal.Strain ]) return response
python
def litters_csv(request): """This view generates a csv output file of all animal data for use in litter analysis. The view writes to a csv table the birthdate, breeding cage and strain.""" animal_list = Animal.objects.all() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=litters.csv' writer = csv.writer(response) writer.writerow(["Born", "Breeding", "Strain"]) for animal in animal_list: writer.writerow([ animal.Born, animal.Breeding, animal.Strain ]) return response
[ "def", "litters_csv", "(", "request", ")", ":", "animal_list", "=", "Animal", ".", "objects", ".", "all", "(", ")", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; ...
This view generates a csv output file of all animal data for use in litter analysis. The view writes to a csv table the birthdate, breeding cage and strain.
[ "This", "view", "generates", "a", "csv", "output", "file", "of", "all", "animal", "data", "for", "use", "in", "litter", "analysis", ".", "The", "view", "writes", "to", "a", "csv", "table", "the", "birthdate", "breeding", "cage", "and", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L272-L287
davebridges/mousedb
mousedb/data/views.py
data_csv
def data_csv(request, measurement_list): """This view generates a csv output of all data for a strain. For this function to work, you have to provide the filtered set of measurements.""" response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=data.csv' writer = csv.writer(response) writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"]) for measurement in measurement_list: writer.writerow([ measurement.animal, measurement.animal.Genotype, measurement.animal.Gender, measurement.assay, measurement.values.split(',')[0], measurement.animal.Strain, measurement.animal.Background, measurement.age(), measurement.animal.Cage, measurement.experiment.feeding_state, measurement.animal.treatment_set.all(), ]) return response
python
def data_csv(request, measurement_list): """This view generates a csv output of all data for a strain. For this function to work, you have to provide the filtered set of measurements.""" response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=data.csv' writer = csv.writer(response) writer.writerow(["Animal", "Genotype", "Gender","Assay", "Value","Strain", "Background","Age", "Cage", "Feeding", "Treatment"]) for measurement in measurement_list: writer.writerow([ measurement.animal, measurement.animal.Genotype, measurement.animal.Gender, measurement.assay, measurement.values.split(',')[0], measurement.animal.Strain, measurement.animal.Background, measurement.age(), measurement.animal.Cage, measurement.experiment.feeding_state, measurement.animal.treatment_set.all(), ]) return response
[ "def", "data_csv", "(", "request", ",", "measurement_list", ")", ":", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=data.csv'", "writer", "=", "csv", ".", "w...
This view generates a csv output of all data for a strain. For this function to work, you have to provide the filtered set of measurements.
[ "This", "view", "generates", "a", "csv", "output", "of", "all", "data", "for", "a", "strain", ".", "For", "this", "function", "to", "work", "you", "have", "to", "provide", "the", "filtered", "set", "of", "measurements", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L289-L312
davebridges/mousedb
mousedb/data/views.py
CohortData.get_queryset
def get_queryset(self): '''The queryset is filtered by measurements of animals which are part of that strain.''' cohort = get_object_or_404(Cohort, slug=self.kwargs['slug']) animals = cohort.animals.all() return Measurement.objects.filter(animal=animals)
python
def get_queryset(self): '''The queryset is filtered by measurements of animals which are part of that strain.''' cohort = get_object_or_404(Cohort, slug=self.kwargs['slug']) animals = cohort.animals.all() return Measurement.objects.filter(animal=animals)
[ "def", "get_queryset", "(", "self", ")", ":", "cohort", "=", "get_object_or_404", "(", "Cohort", ",", "slug", "=", "self", ".", "kwargs", "[", "'slug'", "]", ")", "animals", "=", "cohort", ".", "animals", ".", "all", "(", ")", "return", "Measurement", ...
The queryset is filtered by measurements of animals which are part of that strain.
[ "The", "queryset", "is", "filtered", "by", "measurements", "of", "animals", "which", "are", "part", "of", "that", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L83-L87
davebridges/mousedb
mousedb/data/views.py
CohortDataCSV.get
def get(self, request, *args, **kwargs): '''The queryset is filtered by measurements of animals which are part of that strain.''' cohort = get_object_or_404(Cohort, slug=self.kwargs['slug']) animals = cohort.animals.all() measurements = Measurement.objects.filter(animal=animals) return data_csv(self.request, measurements)
python
def get(self, request, *args, **kwargs): '''The queryset is filtered by measurements of animals which are part of that strain.''' cohort = get_object_or_404(Cohort, slug=self.kwargs['slug']) animals = cohort.animals.all() measurements = Measurement.objects.filter(animal=animals) return data_csv(self.request, measurements)
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cohort", "=", "get_object_or_404", "(", "Cohort", ",", "slug", "=", "self", ".", "kwargs", "[", "'slug'", "]", ")", "animals", "=", "cohort", ".", "ani...
The queryset is filtered by measurements of animals which are part of that strain.
[ "The", "queryset", "is", "filtered", "by", "measurements", "of", "animals", "which", "are", "part", "of", "that", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L94-L99
davebridges/mousedb
mousedb/data/views.py
MeasurementListCSV.get
def get(self, request, *args, **kwargs): '''The queryset returns all measurement objects''' measurements = Measurement.objects.all() return data_csv(self.request, measurements)
python
def get(self, request, *args, **kwargs): '''The queryset returns all measurement objects''' measurements = Measurement.objects.all() return data_csv(self.request, measurements)
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "measurements", "=", "Measurement", ".", "objects", ".", "all", "(", ")", "return", "data_csv", "(", "self", ".", "request", ",", "measurements", ")" ]
The queryset returns all measurement objects
[ "The", "queryset", "returns", "all", "measurement", "objects" ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L390-L393
davebridges/mousedb
mousedb/data/views.py
StrainData.get_queryset
def get_queryset(self): '''The queryset is filtered by measurements of animals which are part of that strain.''' strain = get_object_or_404(Strain, Strain_slug=self.kwargs['strain_slug']) animals = Animal.objects.filter(Strain=strain) return Measurement.objects.filter(animal=animals)
python
def get_queryset(self): '''The queryset is filtered by measurements of animals which are part of that strain.''' strain = get_object_or_404(Strain, Strain_slug=self.kwargs['strain_slug']) animals = Animal.objects.filter(Strain=strain) return Measurement.objects.filter(animal=animals)
[ "def", "get_queryset", "(", "self", ")", ":", "strain", "=", "get_object_or_404", "(", "Strain", ",", "Strain_slug", "=", "self", ".", "kwargs", "[", "'strain_slug'", "]", ")", "animals", "=", "Animal", ".", "objects", ".", "filter", "(", "Strain", "=", ...
The queryset is filtered by measurements of animals which are part of that strain.
[ "The", "queryset", "is", "filtered", "by", "measurements", "of", "animals", "which", "are", "part", "of", "that", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L579-L583
davebridges/mousedb
mousedb/data/views.py
StrainDataCSV.get
def get(self, request, *args, **kwargs): '''The queryset is filtered by measurements of animals which are part of that strain.''' strain = get_object_or_404(Strain, Strain_slug=self.kwargs['strain_slug']) animals = Animal.objects.filter(Strain=strain) measurements = Measurement.objects.filter(animal=animals) return data_csv(self.request, measurements)
python
def get(self, request, *args, **kwargs): '''The queryset is filtered by measurements of animals which are part of that strain.''' strain = get_object_or_404(Strain, Strain_slug=self.kwargs['strain_slug']) animals = Animal.objects.filter(Strain=strain) measurements = Measurement.objects.filter(animal=animals) return data_csv(self.request, measurements)
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "strain", "=", "get_object_or_404", "(", "Strain", ",", "Strain_slug", "=", "self", ".", "kwargs", "[", "'strain_slug'", "]", ")", "animals", "=", "Animal",...
The queryset is filtered by measurements of animals which are part of that strain.
[ "The", "queryset", "is", "filtered", "by", "measurements", "of", "animals", "which", "are", "part", "of", "that", "strain", "." ]
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/data/views.py#L590-L595
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer.vectorize
def vectorize( self, docs ): ''' Returns the feature vectors for a set of docs. If model is not already be trained, then self.train() is called. Args: docs (dict or list of tuples): asset_id, body_text of documents you wish to featurize. ''' if type(docs) == dict: docs = docs.items() if self.model == None: self.train(docs) asset_id2vector = {} unfound = [] for item in docs: ## iterate through the items in docs and check if any are already in the model. asset_id, _ = item label = 'DOC_' + str(asset_id) if label in self.model: asset_id2vector.update({asset_id: self.model['DOC_' + str(asset_id)]}) else: unfound.append(item) if len(unfound) > 0: ## for all assets not in the model, update the model and then get their sentence vectors. sentences = [self._gen_sentence(item) for item in unfound] self.update_model(sentences, train=self.stream_train) asset_id2vector.update({item[0]: self.model['DOC_' + str(item[0])] for item in unfound}) return asset_id2vector
python
def vectorize( self, docs ): ''' Returns the feature vectors for a set of docs. If model is not already be trained, then self.train() is called. Args: docs (dict or list of tuples): asset_id, body_text of documents you wish to featurize. ''' if type(docs) == dict: docs = docs.items() if self.model == None: self.train(docs) asset_id2vector = {} unfound = [] for item in docs: ## iterate through the items in docs and check if any are already in the model. asset_id, _ = item label = 'DOC_' + str(asset_id) if label in self.model: asset_id2vector.update({asset_id: self.model['DOC_' + str(asset_id)]}) else: unfound.append(item) if len(unfound) > 0: ## for all assets not in the model, update the model and then get their sentence vectors. sentences = [self._gen_sentence(item) for item in unfound] self.update_model(sentences, train=self.stream_train) asset_id2vector.update({item[0]: self.model['DOC_' + str(item[0])] for item in unfound}) return asset_id2vector
[ "def", "vectorize", "(", "self", ",", "docs", ")", ":", "if", "type", "(", "docs", ")", "==", "dict", ":", "docs", "=", "docs", ".", "items", "(", ")", "if", "self", ".", "model", "==", "None", ":", "self", ".", "train", "(", "docs", ")", "asse...
Returns the feature vectors for a set of docs. If model is not already be trained, then self.train() is called. Args: docs (dict or list of tuples): asset_id, body_text of documents you wish to featurize.
[ "Returns", "the", "feature", "vectors", "for", "a", "set", "of", "docs", ".", "If", "model", "is", "not", "already", "be", "trained", "then", "self", ".", "train", "()", "is", "called", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L64-L98
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer.train
def train(self, docs, retrain=False): ''' Train Doc2Vec on a series of docs. Train from scratch or update. Args: docs: list of tuples (assetid, body_text) or dictionary {assetid : body_text} retrain: boolean, retrain from scratch or update model saves model in class to self.model Returns: 0 if successful ''' if type(docs) == dict: docs = docs.items() train_sentences = [self._gen_sentence(item) for item in docs] if (self.is_trained) and (retrain == False): ## online training self.update_model(train_sentences, update_labels_bool=True) else: ## train from scratch self.model = Doc2Vec(train_sentences, size=self.size, window=self.window, min_count=self.min_count, workers=self.workers) self.is_trained = True return 0
python
def train(self, docs, retrain=False): ''' Train Doc2Vec on a series of docs. Train from scratch or update. Args: docs: list of tuples (assetid, body_text) or dictionary {assetid : body_text} retrain: boolean, retrain from scratch or update model saves model in class to self.model Returns: 0 if successful ''' if type(docs) == dict: docs = docs.items() train_sentences = [self._gen_sentence(item) for item in docs] if (self.is_trained) and (retrain == False): ## online training self.update_model(train_sentences, update_labels_bool=True) else: ## train from scratch self.model = Doc2Vec(train_sentences, size=self.size, window=self.window, min_count=self.min_count, workers=self.workers) self.is_trained = True return 0
[ "def", "train", "(", "self", ",", "docs", ",", "retrain", "=", "False", ")", ":", "if", "type", "(", "docs", ")", "==", "dict", ":", "docs", "=", "docs", ".", "items", "(", ")", "train_sentences", "=", "[", "self", ".", "_gen_sentence", "(", "item"...
Train Doc2Vec on a series of docs. Train from scratch or update. Args: docs: list of tuples (assetid, body_text) or dictionary {assetid : body_text} retrain: boolean, retrain from scratch or update model saves model in class to self.model Returns: 0 if successful
[ "Train", "Doc2Vec", "on", "a", "series", "of", "docs", ".", "Train", "from", "scratch", "or", "update", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L101-L127
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer.update_model
def update_model(self, sentences, update_labels_bool): ''' takes a list of sentenes and updates an existing model. Vectors will be callable through self.model[label] update_labels_bool: boolean that says whether to train the model (self.model.train_words = True) or simply to get vectors for the documents (self.model.train_words = False) self.vectorize should not train the model further self.train should if model already exists ''' n_sentences = self._add_new_labels(sentences) # add new rows to self.model.syn0 n = self.model.syn0.shape[0] self.model.syn0 = np.vstack(( self.model.syn0, np.empty((n_sentences, self.model.layer1_size), dtype=np.float32) )) for i in xrange(n, n + n_sentences): np.random.seed( np.uint32(self.model.hashfxn(self.model.index2word[i] + str(self.model.seed)))) a = (np.random.rand(self.model.layer1_size) - 0.5) / self.model.layer1_size self.model.syn0[i] = a # Set self.model.train_words to False and self.model.train_labels to True self.model.train_words = update_labels_bool self.model.train_lbls = True # train self.model.train(sentences) return
python
def update_model(self, sentences, update_labels_bool): ''' takes a list of sentenes and updates an existing model. Vectors will be callable through self.model[label] update_labels_bool: boolean that says whether to train the model (self.model.train_words = True) or simply to get vectors for the documents (self.model.train_words = False) self.vectorize should not train the model further self.train should if model already exists ''' n_sentences = self._add_new_labels(sentences) # add new rows to self.model.syn0 n = self.model.syn0.shape[0] self.model.syn0 = np.vstack(( self.model.syn0, np.empty((n_sentences, self.model.layer1_size), dtype=np.float32) )) for i in xrange(n, n + n_sentences): np.random.seed( np.uint32(self.model.hashfxn(self.model.index2word[i] + str(self.model.seed)))) a = (np.random.rand(self.model.layer1_size) - 0.5) / self.model.layer1_size self.model.syn0[i] = a # Set self.model.train_words to False and self.model.train_labels to True self.model.train_words = update_labels_bool self.model.train_lbls = True # train self.model.train(sentences) return
[ "def", "update_model", "(", "self", ",", "sentences", ",", "update_labels_bool", ")", ":", "n_sentences", "=", "self", ".", "_add_new_labels", "(", "sentences", ")", "# add new rows to self.model.syn0", "n", "=", "self", ".", "model", ".", "syn0", ".", "shape", ...
takes a list of sentenes and updates an existing model. Vectors will be callable through self.model[label] update_labels_bool: boolean that says whether to train the model (self.model.train_words = True) or simply to get vectors for the documents (self.model.train_words = False) self.vectorize should not train the model further self.train should if model already exists
[ "takes", "a", "list", "of", "sentenes", "and", "updates", "an", "existing", "model", ".", "Vectors", "will", "be", "callable", "through", "self", ".", "model", "[", "label", "]" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L140-L174
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer._process
def _process(self, input): ''' Takes in html-mixed body text as a string and returns a list of strings, lower case and with punctuation given spacing. Called by self._gen_sentence() Args: inpnut (string): body text ''' input = re.sub("<[^>]*>", " ", input) punct = list(string.punctuation) for symbol in punct: input = input.replace(symbol, " %s " % symbol) input = filter(lambda x: x != u'', input.lower().split(' ')) return input
python
def _process(self, input): ''' Takes in html-mixed body text as a string and returns a list of strings, lower case and with punctuation given spacing. Called by self._gen_sentence() Args: inpnut (string): body text ''' input = re.sub("<[^>]*>", " ", input) punct = list(string.punctuation) for symbol in punct: input = input.replace(symbol, " %s " % symbol) input = filter(lambda x: x != u'', input.lower().split(' ')) return input
[ "def", "_process", "(", "self", ",", "input", ")", ":", "input", "=", "re", ".", "sub", "(", "\"<[^>]*>\"", ",", "\" \"", ",", "input", ")", "punct", "=", "list", "(", "string", ".", "punctuation", ")", "for", "symbol", "in", "punct", ":", "input", ...
Takes in html-mixed body text as a string and returns a list of strings, lower case and with punctuation given spacing. Called by self._gen_sentence() Args: inpnut (string): body text
[ "Takes", "in", "html", "-", "mixed", "body", "text", "as", "a", "string", "and", "returns", "a", "list", "of", "strings", "lower", "case", "and", "with", "punctuation", "given", "spacing", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L177-L193
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer._gen_sentence
def _gen_sentence(self, assetid_body_tuple): ''' Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence Args: assetid_body_tuple (tuple): (assetid, bodytext) pair ''' asset_id, body = assetid_body_tuple text = self._process(body) sentence = LabeledSentence(text, labels=['DOC_%s' % str(asset_id)]) return sentence
python
def _gen_sentence(self, assetid_body_tuple): ''' Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence Args: assetid_body_tuple (tuple): (assetid, bodytext) pair ''' asset_id, body = assetid_body_tuple text = self._process(body) sentence = LabeledSentence(text, labels=['DOC_%s' % str(asset_id)]) return sentence
[ "def", "_gen_sentence", "(", "self", ",", "assetid_body_tuple", ")", ":", "asset_id", ",", "body", "=", "assetid_body_tuple", "text", "=", "self", ".", "_process", "(", "body", ")", "sentence", "=", "LabeledSentence", "(", "text", ",", "labels", "=", "[", ...
Takes an assetid_body_tuple and returns a Doc2Vec LabeledSentence Args: assetid_body_tuple (tuple): (assetid, bodytext) pair
[ "Takes", "an", "assetid_body_tuple", "and", "returns", "a", "Doc2Vec", "LabeledSentence" ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L196-L206
frnsys/broca
broca/vectorize/doc2vec.py
Doc2VecVectorizer._add_new_labels
def _add_new_labels(self, sentences): ''' Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model ''' sentence_no = -1 total_words = 0 vocab = self.model.vocab model_sentence_n = len([l for l in vocab if l.startswith("DOC_")]) n_sentences = 0 for sentence_no, sentence in enumerate(sentences): sentence_length = len(sentence.words) for label in sentence.labels: total_words += 1 if label in vocab: vocab[label].count += sentence_length else: vocab[label] = gensim.models.word2vec.Vocab( count=sentence_length) vocab[label].index = len(self.model.vocab) - 1 vocab[label].code = [0] vocab[label].sample_probability = 1. self.model.index2word.append(label) n_sentences += 1 return n_sentences
python
def _add_new_labels(self, sentences): ''' Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model ''' sentence_no = -1 total_words = 0 vocab = self.model.vocab model_sentence_n = len([l for l in vocab if l.startswith("DOC_")]) n_sentences = 0 for sentence_no, sentence in enumerate(sentences): sentence_length = len(sentence.words) for label in sentence.labels: total_words += 1 if label in vocab: vocab[label].count += sentence_length else: vocab[label] = gensim.models.word2vec.Vocab( count=sentence_length) vocab[label].index = len(self.model.vocab) - 1 vocab[label].code = [0] vocab[label].sample_probability = 1. self.model.index2word.append(label) n_sentences += 1 return n_sentences
[ "def", "_add_new_labels", "(", "self", ",", "sentences", ")", ":", "sentence_no", "=", "-", "1", "total_words", "=", "0", "vocab", "=", "self", ".", "model", ".", "vocab", "model_sentence_n", "=", "len", "(", "[", "l", "for", "l", "in", "vocab", "if", ...
Adds new sentences to the internal indexing of the model. Args: sentences (list): LabeledSentences for each doc to be added Returns: int: number of sentences added to the model
[ "Adds", "new", "sentences", "to", "the", "internal", "indexing", "of", "the", "model", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/vectorize/doc2vec.py#L209-L241
elifesciences/proofreader-python
proofreader/runner.py
run
def run(targets, config_dir='.', check_licenses=False): # type: (List[str], str, bool) -> None """Runs `pylint` and `flake8` commands and exits based off the evaluation of both command results. :param targets: List[str] :param config_dir: str :param check_licenses: bool :return: """ pylint_return_state = False flake8_return_state = False if check_licenses: run_license_checker(config_path=get_license_checker_config_path(config_dir)) pylint_options = get_pylint_options(config_dir=config_dir) flake8_options = get_flake8_options(config_dir=config_dir) if targets: pylint_return_state = _run_command(command='pylint', targets=targets, options=pylint_options) flake8_return_state = _run_command(command='flake8', targets=targets, options=flake8_options) if not flake8_return_state and not pylint_return_state: sys.exit(0) else: sys.exit(1)
python
def run(targets, config_dir='.', check_licenses=False): # type: (List[str], str, bool) -> None """Runs `pylint` and `flake8` commands and exits based off the evaluation of both command results. :param targets: List[str] :param config_dir: str :param check_licenses: bool :return: """ pylint_return_state = False flake8_return_state = False if check_licenses: run_license_checker(config_path=get_license_checker_config_path(config_dir)) pylint_options = get_pylint_options(config_dir=config_dir) flake8_options = get_flake8_options(config_dir=config_dir) if targets: pylint_return_state = _run_command(command='pylint', targets=targets, options=pylint_options) flake8_return_state = _run_command(command='flake8', targets=targets, options=flake8_options) if not flake8_return_state and not pylint_return_state: sys.exit(0) else: sys.exit(1)
[ "def", "run", "(", "targets", ",", "config_dir", "=", "'.'", ",", "check_licenses", "=", "False", ")", ":", "# type: (List[str], str, bool) -> None", "pylint_return_state", "=", "False", "flake8_return_state", "=", "False", "if", "check_licenses", ":", "run_license_ch...
Runs `pylint` and `flake8` commands and exits based off the evaluation of both command results. :param targets: List[str] :param config_dir: str :param check_licenses: bool :return:
[ "Runs", "pylint", "and", "flake8", "commands", "and", "exits", "based", "off", "the", "evaluation", "of", "both", "command", "results", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/runner.py#L24-L52
elifesciences/proofreader-python
proofreader/runner.py
_run_command
def _run_command(command, targets, options): # type: (str, List[str], List[str]) -> bool """Runs `command` + `targets` + `options` in a subprocess and returns a boolean determined by the process return code. >>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E']) >>> result True :param command: str :param targets: List[str] :param options: List[str] :return: bool """ print('{0}: targets={1} options={2}'.format(command, targets, options)) cmd = [command] + targets + options process = Popen(cmd) process.wait() return bool(process.returncode)
python
def _run_command(command, targets, options): # type: (str, List[str], List[str]) -> bool """Runs `command` + `targets` + `options` in a subprocess and returns a boolean determined by the process return code. >>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E']) >>> result True :param command: str :param targets: List[str] :param options: List[str] :return: bool """ print('{0}: targets={1} options={2}'.format(command, targets, options)) cmd = [command] + targets + options process = Popen(cmd) process.wait() return bool(process.returncode)
[ "def", "_run_command", "(", "command", ",", "targets", ",", "options", ")", ":", "# type: (str, List[str], List[str]) -> bool", "print", "(", "'{0}: targets={1} options={2}'", ".", "format", "(", "command", ",", "targets", ",", "options", ")", ")", "cmd", "=", "["...
Runs `command` + `targets` + `options` in a subprocess and returns a boolean determined by the process return code. >>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E']) >>> result True :param command: str :param targets: List[str] :param options: List[str] :return: bool
[ "Runs", "command", "+", "targets", "+", "options", "in", "a", "subprocess", "and", "returns", "a", "boolean", "determined", "by", "the", "process", "return", "code", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/runner.py#L55-L75
rdireen/spherepy
spherepy/pysphi.py
ynnm
def ynnm(n, m): """Initial value for recursion formula""" a = 1.0 / np.sqrt(4.0 * np.pi) pm = np.abs(m) out = 0.0 if(n < pm): out = 0.0 elif(n == 0): out = a else: out = a for k in xrange(1, n + 1): out *= np.sqrt((2.0 * k + 1.0) / 8.0 / k) if(n != pm): for k in xrange(n - 1, pm - 1, -1): out *= np.sqrt((n + k + 1.0) / (n - k)) return out
python
def ynnm(n, m): """Initial value for recursion formula""" a = 1.0 / np.sqrt(4.0 * np.pi) pm = np.abs(m) out = 0.0 if(n < pm): out = 0.0 elif(n == 0): out = a else: out = a for k in xrange(1, n + 1): out *= np.sqrt((2.0 * k + 1.0) / 8.0 / k) if(n != pm): for k in xrange(n - 1, pm - 1, -1): out *= np.sqrt((n + k + 1.0) / (n - k)) return out
[ "def", "ynnm", "(", "n", ",", "m", ")", ":", "a", "=", "1.0", "/", "np", ".", "sqrt", "(", "4.0", "*", "np", ".", "pi", ")", "pm", "=", "np", ".", "abs", "(", "m", ")", "out", "=", "0.0", "if", "(", "n", "<", "pm", ")", ":", "out", "=...
Initial value for recursion formula
[ "Initial", "value", "for", "recursion", "formula" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L54-L73
rdireen/spherepy
spherepy/pysphi.py
ynunm
def ynunm(n, m, L): """Fourier coefficients for spherical harmonics""" out = np.zeros(L, dtype=np.float64) tmp1 = 0 tmp2 = 0 tmp3 = 0 tmp4 = 0 if(np.abs(m) <= n): out[n] = ynnm(n, m) k = n - 2 if(k >= 0): tmp1 = (n - k - 1.0) * (n + k + 2.0) tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2 tmp4 = ((n - k) * (n + k + 1.0)) out[k] = (tmp1 + tmp2) * out[k + 2] / tmp4 for k in xrange(n - 4, -1, -2): tmp1 = (n - k - 1.0) * (n + k + 2.0) tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2 tmp3 = (n - k - 3.0) * (n + k + 4.0); tmp4 = ((n - k) * (n + k + 1.0)) out[k] = ((tmp1 + tmp2) * out[k + 2] - tmp3 * out[k + 4]) / tmp4 return out
python
def ynunm(n, m, L): """Fourier coefficients for spherical harmonics""" out = np.zeros(L, dtype=np.float64) tmp1 = 0 tmp2 = 0 tmp3 = 0 tmp4 = 0 if(np.abs(m) <= n): out[n] = ynnm(n, m) k = n - 2 if(k >= 0): tmp1 = (n - k - 1.0) * (n + k + 2.0) tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2 tmp4 = ((n - k) * (n + k + 1.0)) out[k] = (tmp1 + tmp2) * out[k + 2] / tmp4 for k in xrange(n - 4, -1, -2): tmp1 = (n - k - 1.0) * (n + k + 2.0) tmp2 = (n - k - 2.0) * (n + k + 3.0) - 4.0 * m ** 2 tmp3 = (n - k - 3.0) * (n + k + 4.0); tmp4 = ((n - k) * (n + k + 1.0)) out[k] = ((tmp1 + tmp2) * out[k + 2] - tmp3 * out[k + 4]) / tmp4 return out
[ "def", "ynunm", "(", "n", ",", "m", ",", "L", ")", ":", "out", "=", "np", ".", "zeros", "(", "L", ",", "dtype", "=", "np", ".", "float64", ")", "tmp1", "=", "0", "tmp2", "=", "0", "tmp3", "=", "0", "tmp4", "=", "0", "if", "(", "np", ".", ...
Fourier coefficients for spherical harmonics
[ "Fourier", "coefficients", "for", "spherical", "harmonics" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L75-L98
rdireen/spherepy
spherepy/pysphi.py
smallest_prime_factor
def smallest_prime_factor(Q): """Find the smallest number factorable by the small primes 2, 3, 4, and 7 that is larger than the argument Q""" A = Q; while(A != 1): if(np.mod(A, 2) == 0): A = A / 2 elif(np.mod(A, 3) == 0): A = A / 3 elif(np.mod(A, 5) == 0): A = A / 5 elif(np.mod(A, 7) == 0): A = A / 7; else: A = Q + 1; Q = A; return Q
python
def smallest_prime_factor(Q): """Find the smallest number factorable by the small primes 2, 3, 4, and 7 that is larger than the argument Q""" A = Q; while(A != 1): if(np.mod(A, 2) == 0): A = A / 2 elif(np.mod(A, 3) == 0): A = A / 3 elif(np.mod(A, 5) == 0): A = A / 5 elif(np.mod(A, 7) == 0): A = A / 7; else: A = Q + 1; Q = A; return Q
[ "def", "smallest_prime_factor", "(", "Q", ")", ":", "A", "=", "Q", "while", "(", "A", "!=", "1", ")", ":", "if", "(", "np", ".", "mod", "(", "A", ",", "2", ")", "==", "0", ")", ":", "A", "=", "A", "/", "2", "elif", "(", "np", ".", "mod", ...
Find the smallest number factorable by the small primes 2, 3, 4, and 7 that is larger than the argument Q
[ "Find", "the", "smallest", "number", "factorable", "by", "the", "small", "primes", "2", "3", "4", "and", "7", "that", "is", "larger", "than", "the", "argument", "Q" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L101-L119
rdireen/spherepy
spherepy/pysphi.py
s_data
def s_data(nrows_fdata, Nmax, Q): """ I am going to assume we will always have even data. This is pretty safe because it means that we have measured both poles of the sphere and have data that has been continued. nrows_fdata: Number of rows in fdata. Nmax: The largest number of n values desired. Q: A value greater than nrows_fdata + Nmax. This can be selected to be factorable into small primes to increase the speed of the fft (probably not that big of a deal today). """ if np.mod(nrows_fdata, 2) == 1: raise Exception("nrows_fdata must be even.") L1 = nrows_fdata s = np.zeros(Q, dtype=np.complex128) MM = int(L1 / 2) for nu in xrange(-MM, MM + Nmax + 1): if np.mod(nu, 2) == 1: s[nu - MM] = -1j / nu return s
python
def s_data(nrows_fdata, Nmax, Q): """ I am going to assume we will always have even data. This is pretty safe because it means that we have measured both poles of the sphere and have data that has been continued. nrows_fdata: Number of rows in fdata. Nmax: The largest number of n values desired. Q: A value greater than nrows_fdata + Nmax. This can be selected to be factorable into small primes to increase the speed of the fft (probably not that big of a deal today). """ if np.mod(nrows_fdata, 2) == 1: raise Exception("nrows_fdata must be even.") L1 = nrows_fdata s = np.zeros(Q, dtype=np.complex128) MM = int(L1 / 2) for nu in xrange(-MM, MM + Nmax + 1): if np.mod(nu, 2) == 1: s[nu - MM] = -1j / nu return s
[ "def", "s_data", "(", "nrows_fdata", ",", "Nmax", ",", "Q", ")", ":", "if", "np", ".", "mod", "(", "nrows_fdata", ",", "2", ")", "==", "1", ":", "raise", "Exception", "(", "\"nrows_fdata must be even.\"", ")", "L1", "=", "nrows_fdata", "s", "=", "np", ...
I am going to assume we will always have even data. This is pretty safe because it means that we have measured both poles of the sphere and have data that has been continued. nrows_fdata: Number of rows in fdata. Nmax: The largest number of n values desired. Q: A value greater than nrows_fdata + Nmax. This can be selected to be factorable into small primes to increase the speed of the fft (probably not that big of a deal today).
[ "I", "am", "going", "to", "assume", "we", "will", "always", "have", "even", "data", ".", "This", "is", "pretty", "safe", "because", "it", "means", "that", "we", "have", "measured", "both", "poles", "of", "the", "sphere", "and", "have", "data", "that", ...
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L121-L147
rdireen/spherepy
spherepy/pysphi.py
hkm_fc
def hkm_fc(fdata, Nmax, m, s): """ Assume fdata has even rows""" f = fdata[:, m] L1 = f.size MM = int(L1 / 2) Q = s.size ff = np.zeros(Q, dtype=np.complex128) for n in xrange(MM, L1): ff[n] = f[n - MM] for n in xrange(0, MM): ff[n] = f[n + MM] # For larger problems, this speeds things up pretty good. F = np.fft.fft(ff) S = np.fft.fft(s) out = 4 * np.pi * np.fft.ifft(F * S) return out[0:Nmax + 1]
python
def hkm_fc(fdata, Nmax, m, s): """ Assume fdata has even rows""" f = fdata[:, m] L1 = f.size MM = int(L1 / 2) Q = s.size ff = np.zeros(Q, dtype=np.complex128) for n in xrange(MM, L1): ff[n] = f[n - MM] for n in xrange(0, MM): ff[n] = f[n + MM] # For larger problems, this speeds things up pretty good. F = np.fft.fft(ff) S = np.fft.fft(s) out = 4 * np.pi * np.fft.ifft(F * S) return out[0:Nmax + 1]
[ "def", "hkm_fc", "(", "fdata", ",", "Nmax", ",", "m", ",", "s", ")", ":", "f", "=", "fdata", "[", ":", ",", "m", "]", "L1", "=", "f", ".", "size", "MM", "=", "int", "(", "L1", "/", "2", ")", "Q", "=", "s", ".", "size", "ff", "=", "np", ...
Assume fdata has even rows
[ "Assume", "fdata", "has", "even", "rows" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L149-L169
rdireen/spherepy
spherepy/pysphi.py
mindx
def mindx(m, nmax, mmax): """index to the first n value for a give m within the spherical coefficients vector. Used by sc_to_fc""" ind = 0 NN = nmax + 1 if np.abs(m) > mmax: raise Exception("|m| cannot be larger than mmax") if (m != 0): ind = NN ii = 1 for i in xrange(1, np.abs(m)): ind = ind + 2 * (NN - i) ii = i + 1 if m > 0: ind = ind + NN - ii return ind
python
def mindx(m, nmax, mmax): """index to the first n value for a give m within the spherical coefficients vector. Used by sc_to_fc""" ind = 0 NN = nmax + 1 if np.abs(m) > mmax: raise Exception("|m| cannot be larger than mmax") if (m != 0): ind = NN ii = 1 for i in xrange(1, np.abs(m)): ind = ind + 2 * (NN - i) ii = i + 1 if m > 0: ind = ind + NN - ii return ind
[ "def", "mindx", "(", "m", ",", "nmax", ",", "mmax", ")", ":", "ind", "=", "0", "NN", "=", "nmax", "+", "1", "if", "np", ".", "abs", "(", "m", ")", ">", "mmax", ":", "raise", "Exception", "(", "\"|m| cannot be larger than mmax\"", ")", "if", "(", ...
index to the first n value for a give m within the spherical coefficients vector. Used by sc_to_fc
[ "index", "to", "the", "first", "n", "value", "for", "a", "give", "m", "within", "the", "spherical", "coefficients", "vector", ".", "Used", "by", "sc_to_fc" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L205-L225
rdireen/spherepy
spherepy/pysphi.py
sc_to_fc
def sc_to_fc(spvec, nmax, mmax, nrows, ncols): """assume Ncols is even""" fdata = np.zeros([int(nrows), ncols], dtype=np.complex128) for k in xrange(0, int(ncols / 2)): if k < mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) kk = -(k + 1) ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) if k == mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) return fdata
python
def sc_to_fc(spvec, nmax, mmax, nrows, ncols): """assume Ncols is even""" fdata = np.zeros([int(nrows), ncols], dtype=np.complex128) for k in xrange(0, int(ncols / 2)): if k < mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) kk = -(k + 1) ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) if k == mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) return fdata
[ "def", "sc_to_fc", "(", "spvec", ",", "nmax", ",", "mmax", ",", "nrows", ",", "ncols", ")", ":", "fdata", "=", "np", ".", "zeros", "(", "[", "int", "(", "nrows", ")", ",", "ncols", "]", ",", "dtype", "=", "np", ".", "complex128", ")", "for", "k...
assume Ncols is even
[ "assume", "Ncols", "is", "even" ]
train
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/pysphi.py#L251-L274
ltalirz/aiida-gudhi
aiida_gudhi/calculations/rips.py
RipsDistanceMatrixCalculation._use_methods
def _use_methods(cls): """ Add use_* methods for calculations. Code below enables the usage my_calculation.use_parameters(my_parameters) """ use_dict = JobCalculation._use_methods use_dict.update({ "parameters": { 'valid_types': RipsDistanceMatrixParameters, 'additional_parameter': None, 'linkname': 'parameters', 'docstring': 'add command line parameters', }, "distance_matrix": { 'valid_types': SinglefileData, 'additional_parameter': None, 'linkname': 'distance_matrix', 'docstring': "distance matrix of point cloud", }, "remote_folder": { 'valid_types': RemoteData, 'additional_parameter': None, 'linkname': 'remote_folder', 'docstring': "remote folder containing distance matrix", }, }) return use_dict
python
def _use_methods(cls): """ Add use_* methods for calculations. Code below enables the usage my_calculation.use_parameters(my_parameters) """ use_dict = JobCalculation._use_methods use_dict.update({ "parameters": { 'valid_types': RipsDistanceMatrixParameters, 'additional_parameter': None, 'linkname': 'parameters', 'docstring': 'add command line parameters', }, "distance_matrix": { 'valid_types': SinglefileData, 'additional_parameter': None, 'linkname': 'distance_matrix', 'docstring': "distance matrix of point cloud", }, "remote_folder": { 'valid_types': RemoteData, 'additional_parameter': None, 'linkname': 'remote_folder', 'docstring': "remote folder containing distance matrix", }, }) return use_dict
[ "def", "_use_methods", "(", "cls", ")", ":", "use_dict", "=", "JobCalculation", ".", "_use_methods", "use_dict", ".", "update", "(", "{", "\"parameters\"", ":", "{", "'valid_types'", ":", "RipsDistanceMatrixParameters", ",", "'additional_parameter'", ":", "None", ...
Add use_* methods for calculations. Code below enables the usage my_calculation.use_parameters(my_parameters)
[ "Add", "use_", "*", "methods", "for", "calculations", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/calculations/rips.py#L36-L64
ltalirz/aiida-gudhi
aiida_gudhi/calculations/rips.py
RipsDistanceMatrixCalculation._validate_inputs
def _validate_inputs(self, inputdict): """ Validate input links. """ # Check inputdict try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, RipsDistanceMatrixParameters): raise InputValidationError("parameters not of type " "RipsDistanceMatrixParameters") # Check code try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") # Check input files try: distance_matrix = inputdict.pop( self.get_linkname('distance_matrix')) if not isinstance(distance_matrix, SinglefileData): raise InputValidationError( "distance_matrix not of type SinglefileData") symlink = None except KeyError: distance_matrix = None try: remote_folder = inputdict.pop( self.get_linkname('remote_folder')) if not isinstance(remote_folder, RemoteData): raise InputValidationError( "remote_folder is not of type RemoteData") comp_uuid = remote_folder.get_computer().uuid remote_path = remote_folder.get_remote_path() symlink = (comp_uuid, remote_path, self._REMOTE_FOLDER_LINK) except KeyError: raise InputValidationError( "Need to provide either distance_matrix or remote_folder") # Check that nothing is left unparsed if inputdict: raise ValidationError("Unrecognized inputs: {}".format(inputdict)) return parameters, code, distance_matrix, symlink
python
def _validate_inputs(self, inputdict): """ Validate input links. """ # Check inputdict try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, RipsDistanceMatrixParameters): raise InputValidationError("parameters not of type " "RipsDistanceMatrixParameters") # Check code try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") # Check input files try: distance_matrix = inputdict.pop( self.get_linkname('distance_matrix')) if not isinstance(distance_matrix, SinglefileData): raise InputValidationError( "distance_matrix not of type SinglefileData") symlink = None except KeyError: distance_matrix = None try: remote_folder = inputdict.pop( self.get_linkname('remote_folder')) if not isinstance(remote_folder, RemoteData): raise InputValidationError( "remote_folder is not of type RemoteData") comp_uuid = remote_folder.get_computer().uuid remote_path = remote_folder.get_remote_path() symlink = (comp_uuid, remote_path, self._REMOTE_FOLDER_LINK) except KeyError: raise InputValidationError( "Need to provide either distance_matrix or remote_folder") # Check that nothing is left unparsed if inputdict: raise ValidationError("Unrecognized inputs: {}".format(inputdict)) return parameters, code, distance_matrix, symlink
[ "def", "_validate_inputs", "(", "self", ",", "inputdict", ")", ":", "# Check inputdict", "try", ":", "parameters", "=", "inputdict", ".", "pop", "(", "self", ".", "get_linkname", "(", "'parameters'", ")", ")", "except", "KeyError", ":", "raise", "InputValidati...
Validate input links.
[ "Validate", "input", "links", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/calculations/rips.py#L66-L116
ltalirz/aiida-gudhi
aiida_gudhi/calculations/rips.py
RipsDistanceMatrixCalculation._prepare_for_submission
def _prepare_for_submission(self, tempfolder, inputdict): """ Create input files. :param tempfolder: aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: dictionary of the input nodes as they would be returned by get_inputs_dict """ parameters, code, distance_matrix, symlink = \ self._validate_inputs(inputdict) # Prepare CalcInfo to be returned to aiida calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.remote_copy_list = [] calcinfo.retrieve_list = parameters.output_files codeinfo = CodeInfo() codeinfo.code_uuid = code.uuid if distance_matrix is not None: calcinfo.local_copy_list = [ [ distance_matrix.get_file_abs_path(), distance_matrix.filename ], ] codeinfo.cmdline_params = parameters.cmdline_params( distance_matrix_file_name=distance_matrix.filename) else: calcinfo.remote_symlink_list = [symlink] codeinfo.cmdline_params = parameters.cmdline_params( remote_folder_path=self._REMOTE_FOLDER_LINK) calcinfo.codes_info = [codeinfo] return calcinfo
python
def _prepare_for_submission(self, tempfolder, inputdict): """ Create input files. :param tempfolder: aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: dictionary of the input nodes as they would be returned by get_inputs_dict """ parameters, code, distance_matrix, symlink = \ self._validate_inputs(inputdict) # Prepare CalcInfo to be returned to aiida calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.remote_copy_list = [] calcinfo.retrieve_list = parameters.output_files codeinfo = CodeInfo() codeinfo.code_uuid = code.uuid if distance_matrix is not None: calcinfo.local_copy_list = [ [ distance_matrix.get_file_abs_path(), distance_matrix.filename ], ] codeinfo.cmdline_params = parameters.cmdline_params( distance_matrix_file_name=distance_matrix.filename) else: calcinfo.remote_symlink_list = [symlink] codeinfo.cmdline_params = parameters.cmdline_params( remote_folder_path=self._REMOTE_FOLDER_LINK) calcinfo.codes_info = [codeinfo] return calcinfo
[ "def", "_prepare_for_submission", "(", "self", ",", "tempfolder", ",", "inputdict", ")", ":", "parameters", ",", "code", ",", "distance_matrix", ",", "symlink", "=", "self", ".", "_validate_inputs", "(", "inputdict", ")", "# Prepare CalcInfo to be returned to aiida", ...
Create input files. :param tempfolder: aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: dictionary of the input nodes as they would be returned by get_inputs_dict
[ "Create", "input", "files", "." ]
train
https://github.com/ltalirz/aiida-gudhi/blob/81ebec782ddff3ab97a3e3242b809fec989fa4b9/aiida_gudhi/calculations/rips.py#L118-L155
darkfeline/animanager
animanager/db/query/status.py
cache_status
def cache_status(db, aid, force=False): """Calculate and cache status for given anime. Don't do anything if status already exists and force is False. """ with db: cur = db.cursor() if not force: # We don't do anything if we already have this aid in our # cache. cur.execute('SELECT 1 FROM cache_anime WHERE aid=?', (aid,)) if cur.fetchone() is not None: return # Retrieve information for determining complete. cur.execute( 'SELECT episodecount, enddate FROM anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('aid provided does not exist') episodecount, enddate = row # Select all regular episodes in ascending order. cur.execute(""" SELECT number, user_watched FROM episode WHERE aid=? AND type=? ORDER BY number ASC """, (aid, get_eptype(db, 'regular').id)) # We find the last consecutive episode that is user_watched. number = 0 for number, watched in cur: # Once we find the first unwatched episode, we set the last # consecutive watched episode to the previous episode (or 0). if watched == 0: number -= 1 break # We store this in the cache. set_status(db, aid, enddate and episodecount <= number, number)
python
def cache_status(db, aid, force=False): """Calculate and cache status for given anime. Don't do anything if status already exists and force is False. """ with db: cur = db.cursor() if not force: # We don't do anything if we already have this aid in our # cache. cur.execute('SELECT 1 FROM cache_anime WHERE aid=?', (aid,)) if cur.fetchone() is not None: return # Retrieve information for determining complete. cur.execute( 'SELECT episodecount, enddate FROM anime WHERE aid=?', (aid,)) row = cur.fetchone() if row is None: raise ValueError('aid provided does not exist') episodecount, enddate = row # Select all regular episodes in ascending order. cur.execute(""" SELECT number, user_watched FROM episode WHERE aid=? AND type=? ORDER BY number ASC """, (aid, get_eptype(db, 'regular').id)) # We find the last consecutive episode that is user_watched. number = 0 for number, watched in cur: # Once we find the first unwatched episode, we set the last # consecutive watched episode to the previous episode (or 0). if watched == 0: number -= 1 break # We store this in the cache. set_status(db, aid, enddate and episodecount <= number, number)
[ "def", "cache_status", "(", "db", ",", "aid", ",", "force", "=", "False", ")", ":", "with", "db", ":", "cur", "=", "db", ".", "cursor", "(", ")", "if", "not", "force", ":", "# We don't do anything if we already have this aid in our", "# cache.", "cur", ".", ...
Calculate and cache status for given anime. Don't do anything if status already exists and force is False.
[ "Calculate", "and", "cache", "status", "for", "given", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/status.py#L32-L71
darkfeline/animanager
animanager/db/query/status.py
set_status
def set_status( db, aid: int, complete: Any, watched_episodes: int, ) -> None: """Set anime status.""" upsert(db, 'cache_anime', ['aid'], { 'aid': aid, 'complete': 1 if complete else 0, 'watched_episodes': watched_episodes, })
python
def set_status( db, aid: int, complete: Any, watched_episodes: int, ) -> None: """Set anime status.""" upsert(db, 'cache_anime', ['aid'], { 'aid': aid, 'complete': 1 if complete else 0, 'watched_episodes': watched_episodes, })
[ "def", "set_status", "(", "db", ",", "aid", ":", "int", ",", "complete", ":", "Any", ",", "watched_episodes", ":", "int", ",", ")", "->", "None", ":", "upsert", "(", "db", ",", "'cache_anime'", ",", "[", "'aid'", "]", ",", "{", "'aid'", ":", "aid",...
Set anime status.
[ "Set", "anime", "status", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/status.py#L74-L85
darkfeline/animanager
animanager/db/query/status.py
get_complete
def get_complete(db) -> Iterator[int]: """Return AID of complete anime.""" cur = db.cursor() cur.execute( """SELECT aid FROM cache_anime WHERE complete=?""", (1,)) for row in cur: yield row[0]
python
def get_complete(db) -> Iterator[int]: """Return AID of complete anime.""" cur = db.cursor() cur.execute( """SELECT aid FROM cache_anime WHERE complete=?""", (1,)) for row in cur: yield row[0]
[ "def", "get_complete", "(", "db", ")", "->", "Iterator", "[", "int", "]", ":", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"\"\"SELECT aid FROM cache_anime\n WHERE complete=?\"\"\"", ",", "(", "1", ",", ")", ")", "for", ...
Return AID of complete anime.
[ "Return", "AID", "of", "complete", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/status.py#L88-L95
gbiggs/rtctree
rtctree/manager.py
Manager.create_component
def create_component(self, module_name): '''Create a component out of a loaded module. Turns a previously-loaded shared module into a component in the manager. This will invalidate any objects that are children of this node. The @ref module_name argument can contain options that set various properties of the new component. These must be appended to the module name, prefixed by a question mark for each property, in key=value format. For example, to change the instance name of the new component, append '?instance_name=new_name' to the module name. @param module_name Name of the module to turn into a component. @raises FailedToCreateComponentError ''' with self._mutex: if not self._obj.create_component(module_name): raise exceptions.FailedToCreateComponentError(module_name) # The list of child components will have changed now, so it must be # reparsed. self._parse_component_children()
python
def create_component(self, module_name): '''Create a component out of a loaded module. Turns a previously-loaded shared module into a component in the manager. This will invalidate any objects that are children of this node. The @ref module_name argument can contain options that set various properties of the new component. These must be appended to the module name, prefixed by a question mark for each property, in key=value format. For example, to change the instance name of the new component, append '?instance_name=new_name' to the module name. @param module_name Name of the module to turn into a component. @raises FailedToCreateComponentError ''' with self._mutex: if not self._obj.create_component(module_name): raise exceptions.FailedToCreateComponentError(module_name) # The list of child components will have changed now, so it must be # reparsed. self._parse_component_children()
[ "def", "create_component", "(", "self", ",", "module_name", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_obj", ".", "create_component", "(", "module_name", ")", ":", "raise", "exceptions", ".", "FailedToCreateComponentError", "(", ...
Create a component out of a loaded module. Turns a previously-loaded shared module into a component in the manager. This will invalidate any objects that are children of this node. The @ref module_name argument can contain options that set various properties of the new component. These must be appended to the module name, prefixed by a question mark for each property, in key=value format. For example, to change the instance name of the new component, append '?instance_name=new_name' to the module name. @param module_name Name of the module to turn into a component. @raises FailedToCreateComponentError
[ "Create", "a", "component", "out", "of", "a", "loaded", "module", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L174-L196
gbiggs/rtctree
rtctree/manager.py
Manager.delete_component
def delete_component(self, instance_name): '''Delete a component. Deletes the component specified by @ref instance_name from the manager. This will invalidate any objects that are children of this node. @param instance_name The instance name of the component to delete. @raises FailedToDeleteComponentError ''' with self._mutex: if self._obj.delete_component(instance_name) != RTC.RTC_OK: raise exceptions.FailedToDeleteComponentError(instance_name) # The list of child components will have changed now, so it must be # reparsed. self._parse_component_children()
python
def delete_component(self, instance_name): '''Delete a component. Deletes the component specified by @ref instance_name from the manager. This will invalidate any objects that are children of this node. @param instance_name The instance name of the component to delete. @raises FailedToDeleteComponentError ''' with self._mutex: if self._obj.delete_component(instance_name) != RTC.RTC_OK: raise exceptions.FailedToDeleteComponentError(instance_name) # The list of child components will have changed now, so it must be # reparsed. self._parse_component_children()
[ "def", "delete_component", "(", "self", ",", "instance_name", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_obj", ".", "delete_component", "(", "instance_name", ")", "!=", "RTC", ".", "RTC_OK", ":", "raise", "exceptions", ".", "FailedT...
Delete a component. Deletes the component specified by @ref instance_name from the manager. This will invalidate any objects that are children of this node. @param instance_name The instance name of the component to delete. @raises FailedToDeleteComponentError
[ "Delete", "a", "component", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L198-L213
gbiggs/rtctree
rtctree/manager.py
Manager.load_module
def load_module(self, path, init_func): '''Load a shared library. Call this function to load a shared library (DLL file under Windows, shared object under UNIX) into the manager. @param path The path to the shared library. @param init_func The name entry function in the library. @raises FailedToLoadModuleError ''' try: with self._mutex: if self._obj.load_module(path, init_func) != RTC.RTC_OK: raise exceptions.FailedToLoadModuleError(path) except CORBA.UNKNOWN as e: if e.args[0] == UNKNOWN_UserException: raise exceptions.FailedToLoadModuleError(path, 'CORBA User Exception') else: raise
python
def load_module(self, path, init_func): '''Load a shared library. Call this function to load a shared library (DLL file under Windows, shared object under UNIX) into the manager. @param path The path to the shared library. @param init_func The name entry function in the library. @raises FailedToLoadModuleError ''' try: with self._mutex: if self._obj.load_module(path, init_func) != RTC.RTC_OK: raise exceptions.FailedToLoadModuleError(path) except CORBA.UNKNOWN as e: if e.args[0] == UNKNOWN_UserException: raise exceptions.FailedToLoadModuleError(path, 'CORBA User Exception') else: raise
[ "def", "load_module", "(", "self", ",", "path", ",", "init_func", ")", ":", "try", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_obj", ".", "load_module", "(", "path", ",", "init_func", ")", "!=", "RTC", ".", "RTC_OK", ":", "raise", ...
Load a shared library. Call this function to load a shared library (DLL file under Windows, shared object under UNIX) into the manager. @param path The path to the shared library. @param init_func The name entry function in the library. @raises FailedToLoadModuleError
[ "Load", "a", "shared", "library", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L215-L234
gbiggs/rtctree
rtctree/manager.py
Manager.unload_module
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
python
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
[ "def", "unload_module", "(", "self", ",", "path", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_obj", ".", "unload_module", "(", "path", ")", "!=", "RTC", ".", "RTC_OK", ":", "raise", "FailedToUnloadModuleError", "(", "path", ")" ]
Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError
[ "Unload", "a", "loaded", "shared", "library", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L236-L248
gbiggs/rtctree
rtctree/manager.py
Manager.components
def components(self): '''The list of components in this manager, if any. This information can also be found by listing the children of this node that are of type @ref Component. That method is more useful as it returns the tree entries for the components. ''' with self._mutex: if not self._components: self._components = [c for c in self.children if c.is_component] return self._components
python
def components(self): '''The list of components in this manager, if any. This information can also be found by listing the children of this node that are of type @ref Component. That method is more useful as it returns the tree entries for the components. ''' with self._mutex: if not self._components: self._components = [c for c in self.children if c.is_component] return self._components
[ "def", "components", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_components", ":", "self", ".", "_components", "=", "[", "c", "for", "c", "in", "self", ".", "children", "if", "c", ".", "is_component", "]", ...
The list of components in this manager, if any. This information can also be found by listing the children of this node that are of type @ref Component. That method is more useful as it returns the tree entries for the components.
[ "The", "list", "of", "components", "in", "this", "manager", "if", "any", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L251-L262