repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI._uses_aiohttp_session
def _uses_aiohttp_session(func): """This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.""" # The function the decorator returns async def decorated_func(*args, session=None, **kwargs): if session is not None: # There is a session passed return await func(*args, _session=session, **kwargs) else: # The session argument wasn't passed, so we create our own async with aiohttp.ClientSession() as new_session: return await func(*args, _session=new_session, **kwargs) # We return the decorated func return decorated_func
python
def _uses_aiohttp_session(func): """This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.""" # The function the decorator returns async def decorated_func(*args, session=None, **kwargs): if session is not None: # There is a session passed return await func(*args, _session=session, **kwargs) else: # The session argument wasn't passed, so we create our own async with aiohttp.ClientSession() as new_session: return await func(*args, _session=new_session, **kwargs) # We return the decorated func return decorated_func
[ "def", "_uses_aiohttp_session", "(", "func", ")", ":", "# The function the decorator returns", "async", "def", "decorated_func", "(", "*", "args", ",", "session", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "session", "is", "not", "None", ":", "# There is a session passed", "return", "await", "func", "(", "*", "args", ",", "_session", "=", "session", ",", "*", "*", "kwargs", ")", "else", ":", "# The session argument wasn't passed, so we create our own", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "new_session", ":", "return", "await", "func", "(", "*", "args", ",", "_session", "=", "new_session", ",", "*", "*", "kwargs", ")", "# We return the decorated func", "return", "decorated_func" ]
This is a decorator that creates an async with statement around a function, and makes sure that a _session argument is always passed. Only usable on async functions of course. The _session argument is (supposed to be) an aiohttp.ClientSession instance in all functions that this decorator has been used on. This is used to make sure that all session objects are properly entered and exited, or that they are passed into a function properly. This adds an session keyword argument to the method signature, and that session will be used as _session if it is not None.
[ "This", "is", "a", "decorator", "that", "creates", "an", "async", "with", "statement", "around", "a", "function", "and", "makes", "sure", "that", "a", "_session", "argument", "is", "always", "passed", ".", "Only", "usable", "on", "async", "functions", "of", "course", ".", "The", "_session", "argument", "is", "(", "supposed", "to", "be", ")", "an", "aiohttp", ".", "ClientSession", "instance", "in", "all", "functions", "that", "this", "decorator", "has", "been", "used", "on", ".", "This", "is", "used", "to", "make", "sure", "that", "all", "session", "objects", "are", "properly", "entered", "and", "exited", "or", "that", "they", "are", "passed", "into", "a", "function", "properly", ".", "This", "adds", "an", "session", "keyword", "argument", "to", "the", "method", "signature", "and", "that", "session", "will", "be", "used", "as", "_session", "if", "it", "is", "not", "None", "." ]
aba976a3c07c4932de13f4236d924b2901b149b9
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L46-L64
train
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI._add_request_parameters
def _add_request_parameters(func): """Adds the ratelimit and request timeout parameters to a function.""" # The function the decorator returns async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) # We return the decorated func return decorated_func
python
def _add_request_parameters(func): """Adds the ratelimit and request timeout parameters to a function.""" # The function the decorator returns async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) # We return the decorated func return decorated_func
[ "def", "_add_request_parameters", "(", "func", ")", ":", "# The function the decorator returns", "async", "def", "decorated_func", "(", "*", "args", ",", "handle_ratelimit", "=", "None", ",", "max_tries", "=", "None", ",", "request_timeout", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "await", "func", "(", "*", "args", ",", "handle_ratelimit", "=", "handle_ratelimit", ",", "max_tries", "=", "max_tries", ",", "request_timeout", "=", "request_timeout", ",", "*", "*", "kwargs", ")", "# We return the decorated func", "return", "decorated_func" ]
Adds the ratelimit and request timeout parameters to a function.
[ "Adds", "the", "ratelimit", "and", "request", "timeout", "parameters", "to", "a", "function", "." ]
aba976a3c07c4932de13f4236d924b2901b149b9
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L66-L75
train
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI.get_stats
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
python
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
[ "async", "def", "get_stats", "(", "self", ",", "battletag", ":", "str", ",", "regions", "=", "(", "EUROPE", ",", "KOREA", ",", "AMERICAS", ",", "CHINA", ",", "JAPAN", ",", "ANY", ")", ",", "platform", "=", "None", ",", "_session", "=", "None", ",", "handle_ratelimit", "=", "None", ",", "max_tries", "=", "None", ",", "request_timeout", "=", "None", ")", ":", "if", "platform", "is", "None", ":", "platform", "=", "self", ".", "default_platform", "try", ":", "blob_dict", "=", "await", "self", ".", "_base_request", "(", "battletag", ",", "\"stats\"", ",", "_session", ",", "platform", "=", "platform", ",", "handle_ratelimit", "=", "handle_ratelimit", ",", "max_tries", "=", "max_tries", ",", "request_timeout", "=", "request_timeout", ")", "except", "ProfileNotFoundError", "as", "e", ":", "# The battletag doesn't exist", "blob_dict", "=", "{", "}", "existing_regions", "=", "{", "key", ":", "val", "for", "key", ",", "val", "in", "blob_dict", ".", "items", "(", ")", "if", "(", "(", "val", "is", "not", "None", ")", "and", "(", "key", "!=", "\"_request\"", ")", ")", "}", "return", "{", "key", ":", "[", "inner_val", "for", "inner_key", ",", "inner_val", "in", "val", ".", "items", "(", ")", "if", "inner_key", "==", "\"stats\"", "]", "[", "0", "]", "for", "key", ",", "val", "in", "existing_regions", ".", "items", "(", ")", "if", "key", "in", "regions", "}" ]
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.
[ "Returns", "the", "stats", "for", "the", "profiles", "on", "the", "specified", "regions", "and", "platform", ".", "The", "format", "for", "regions", "without", "a", "matching", "user", "the", "format", "is", "the", "same", "as", "get_profile", ".", "The", "stats", "are", "returned", "in", "a", "dictionary", "with", "a", "similar", "format", "to", "what", "https", ":", "//", "github", ".", "com", "/", "SunDwarf", "/", "OWAPI", "/", "blob", "/", "master", "/", "api", ".", "md#get", "-", "apiv3ubattletagstats", "specifies", "." ]
aba976a3c07c4932de13f4236d924b2901b149b9
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L98-L114
train
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI._base_request
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Does a request to some endpoint. This is also where ratelimit logic is handled.""" # We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object if platform is None: platform = self.default_platform if handle_ratelimit is None: handle_ratelimit = self.default_handle_ratelimit if max_tries is None: max_tries = self.default_max_tries if request_timeout is None: request_timeout = self.default_request_timeout # The battletag with #s removed san_battle_tag = self.sanitize_battletag(battle_tag) # The ratelimit logic for _ in range(max_tries): # We execute a request try: resp_json, status = await self._async_get( session, self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format( battle_tag=san_battle_tag, endpoint=endpoint_name ), params={"platform": platform}, # Passed to _async_get and indicates what platform we're searching on headers={"User-Agent": "overwatch_python_api"}, # According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do _async_timeout_seconds=request_timeout ) if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError except RatelimitError as e: # This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout # We are ratelimited, so we check if we handle ratelimiting logic # If so, we wait and then execute the next iteration of the loop if handle_ratelimit: # We wait to remedy ratelimiting, and we wait a bit more than the response says we should await asyncio.sleep(resp_json["retry"] + 1) continue else: raise else: # We didn't get an error, so we exit the loop because it was a successful request break else: # The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.") # Validate the response if status != 200: if status == 404 and resp_json["msg"] == "profile not found": raise ProfileNotFoundError( "Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.") if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError( "Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.") raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status)) return resp_json
python
async def _base_request(self, battle_tag: str, endpoint_name: str, session: aiohttp.ClientSession, *, platform=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Does a request to some endpoint. This is also where ratelimit logic is handled.""" # We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object if platform is None: platform = self.default_platform if handle_ratelimit is None: handle_ratelimit = self.default_handle_ratelimit if max_tries is None: max_tries = self.default_max_tries if request_timeout is None: request_timeout = self.default_request_timeout # The battletag with #s removed san_battle_tag = self.sanitize_battletag(battle_tag) # The ratelimit logic for _ in range(max_tries): # We execute a request try: resp_json, status = await self._async_get( session, self.server_url + self._api_urlpath + "{battle_tag}/{endpoint}".format( battle_tag=san_battle_tag, endpoint=endpoint_name ), params={"platform": platform}, # Passed to _async_get and indicates what platform we're searching on headers={"User-Agent": "overwatch_python_api"}, # According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do _async_timeout_seconds=request_timeout ) if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError except RatelimitError as e: # This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout # We are ratelimited, so we check if we handle ratelimiting logic # If so, we wait and then execute the next iteration of the loop if handle_ratelimit: # We wait to remedy ratelimiting, and we wait a bit more than the response says we should await asyncio.sleep(resp_json["retry"] + 1) continue else: raise else: # We didn't get an error, so we exit the loop because it was a successful request break else: # The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished raise RatelimitError("Got ratelimited for each requests until the maximum number of retries were reached.") # Validate the response if status != 200: if status == 404 and resp_json["msg"] == "profile not found": raise ProfileNotFoundError( "Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.") if status == 429 and resp_json["msg"] == "you are being ratelimited": raise RatelimitError( "Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.") raise ConnectionError("Did not get HTTP status 200, got: {0}".format(status)) return resp_json
[ "async", "def", "_base_request", "(", "self", ",", "battle_tag", ":", "str", ",", "endpoint_name", ":", "str", ",", "session", ":", "aiohttp", ".", "ClientSession", ",", "*", ",", "platform", "=", "None", ",", "handle_ratelimit", "=", "None", ",", "max_tries", "=", "None", ",", "request_timeout", "=", "None", ")", ":", "# We check the different optional arguments, and if they're not passed (are none) we set them to the default for the client object", "if", "platform", "is", "None", ":", "platform", "=", "self", ".", "default_platform", "if", "handle_ratelimit", "is", "None", ":", "handle_ratelimit", "=", "self", ".", "default_handle_ratelimit", "if", "max_tries", "is", "None", ":", "max_tries", "=", "self", ".", "default_max_tries", "if", "request_timeout", "is", "None", ":", "request_timeout", "=", "self", ".", "default_request_timeout", "# The battletag with #s removed", "san_battle_tag", "=", "self", ".", "sanitize_battletag", "(", "battle_tag", ")", "# The ratelimit logic", "for", "_", "in", "range", "(", "max_tries", ")", ":", "# We execute a request", "try", ":", "resp_json", ",", "status", "=", "await", "self", ".", "_async_get", "(", "session", ",", "self", ".", "server_url", "+", "self", ".", "_api_urlpath", "+", "\"{battle_tag}/{endpoint}\"", ".", "format", "(", "battle_tag", "=", "san_battle_tag", ",", "endpoint", "=", "endpoint_name", ")", ",", "params", "=", "{", "\"platform\"", ":", "platform", "}", ",", "# Passed to _async_get and indicates what platform we're searching on", "headers", "=", "{", "\"User-Agent\"", ":", "\"overwatch_python_api\"", "}", ",", "# According to https://github.com/SunDwarf/OWAPI/blob/master/owapi/v3/v3_util.py#L18 we have to customise our User-Agent, so we do", "_async_timeout_seconds", "=", "request_timeout", ")", "if", "status", "==", "429", "and", "resp_json", "[", "\"msg\"", "]", "==", "\"you are being ratelimited\"", ":", "raise", "RatelimitError", "except", "RatelimitError", "as", "e", ":", "# This excepts both RatelimitErrors and TimeoutErrors, ratelimiterrors for server returning a ratelimit, timeouterrors for the connection not being done in with in the timeout", "# We are ratelimited, so we check if we handle ratelimiting logic", "# If so, we wait and then execute the next iteration of the loop", "if", "handle_ratelimit", ":", "# We wait to remedy ratelimiting, and we wait a bit more than the response says we should", "await", "asyncio", ".", "sleep", "(", "resp_json", "[", "\"retry\"", "]", "+", "1", ")", "continue", "else", ":", "raise", "else", ":", "# We didn't get an error, so we exit the loop because it was a successful request", "break", "else", ":", "# The loop didn't stop because it got breaked, which means that we got ratelimited until the maximum number of tries were finished", "raise", "RatelimitError", "(", "\"Got ratelimited for each requests until the maximum number of retries were reached.\"", ")", "# Validate the response", "if", "status", "!=", "200", ":", "if", "status", "==", "404", "and", "resp_json", "[", "\"msg\"", "]", "==", "\"profile not found\"", ":", "raise", "ProfileNotFoundError", "(", "\"Got HTTP 404, profile not found. This is caused by the given battletag not existing on the specified platform.\"", ")", "if", "status", "==", "429", "and", "resp_json", "[", "\"msg\"", "]", "==", "\"you are being ratelimited\"", ":", "raise", "RatelimitError", "(", "\"Got HTTP 429, you are being ratelimited. This is caused by calls to the api too frequently.\"", ")", "raise", "ConnectionError", "(", "\"Did not get HTTP status 200, got: {0}\"", ".", "format", "(", "status", ")", ")", "return", "resp_json" ]
Does a request to some endpoint. This is also where ratelimit logic is handled.
[ "Does", "a", "request", "to", "some", "endpoint", ".", "This", "is", "also", "where", "ratelimit", "logic", "is", "handled", "." ]
aba976a3c07c4932de13f4236d924b2901b149b9
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L164-L224
train
anthok/overwatch-api
overwatch_api/core.py
AsyncOWAPI._async_get
async def _async_get(self, session: aiohttp.ClientSession, *args, _async_timeout_seconds: int = 5, **kwargs): """Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.""" # Taken almost directly from the aiohttp tutorial with async_timeout.timeout(_async_timeout_seconds): async with session.get(*args, **kwargs) as response: return await response.json(), response.status
python
async def _async_get(self, session: aiohttp.ClientSession, *args, _async_timeout_seconds: int = 5, **kwargs): """Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.""" # Taken almost directly from the aiohttp tutorial with async_timeout.timeout(_async_timeout_seconds): async with session.get(*args, **kwargs) as response: return await response.json(), response.status
[ "async", "def", "_async_get", "(", "self", ",", "session", ":", "aiohttp", ".", "ClientSession", ",", "*", "args", ",", "_async_timeout_seconds", ":", "int", "=", "5", ",", "*", "*", "kwargs", ")", ":", "# Taken almost directly from the aiohttp tutorial", "with", "async_timeout", ".", "timeout", "(", "_async_timeout_seconds", ")", ":", "async", "with", "session", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "response", ":", "return", "await", "response", ".", "json", "(", ")", ",", "response", ".", "status" ]
Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.
[ "Uses", "aiohttp", "to", "make", "a", "get", "request", "asynchronously", ".", "Will", "raise", "asyncio", ".", "TimeoutError", "if", "the", "request", "could", "not", "be", "completed", "within", "_async_timeout_seconds", "(", "default", "5", ")", "seconds", "." ]
aba976a3c07c4932de13f4236d924b2901b149b9
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L226-L235
train
Xion/callee
callee/objects.py
is_method
def is_method(arg, min_arity=None, max_arity=None): """Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ if not callable(arg): return False if not any(is_(arg) for is_ in (inspect.ismethod, inspect.ismethoddescriptor, inspect.isbuiltin)): return False try: argnames, varargs, kwargs, defaults = getargspec(arg) except TypeError: # On CPython 2.x, built-in methods of file aren't inspectable, # so if it's file.read() or file.write(), we can't tell it for sure. # Given how this check is being used, assuming the best is probably # all we can do here. return True else: if argnames and argnames[0] == 'self': argnames = argnames[1:] if min_arity is not None: actual_min_arity = len(argnames) - len(defaults or ()) assert actual_min_arity >= 0, ( "Minimum arity of %r found to be negative (got %s)!" % ( arg, actual_min_arity)) if int(min_arity) != actual_min_arity: return False if max_arity is not None: actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames) if int(max_arity) != actual_max_arity: return False return True
python
def is_method(arg, min_arity=None, max_arity=None): """Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum. """ if not callable(arg): return False if not any(is_(arg) for is_ in (inspect.ismethod, inspect.ismethoddescriptor, inspect.isbuiltin)): return False try: argnames, varargs, kwargs, defaults = getargspec(arg) except TypeError: # On CPython 2.x, built-in methods of file aren't inspectable, # so if it's file.read() or file.write(), we can't tell it for sure. # Given how this check is being used, assuming the best is probably # all we can do here. return True else: if argnames and argnames[0] == 'self': argnames = argnames[1:] if min_arity is not None: actual_min_arity = len(argnames) - len(defaults or ()) assert actual_min_arity >= 0, ( "Minimum arity of %r found to be negative (got %s)!" % ( arg, actual_min_arity)) if int(min_arity) != actual_min_arity: return False if max_arity is not None: actual_max_arity = sys.maxsize if varargs or kwargs else len(argnames) if int(max_arity) != actual_max_arity: return False return True
[ "def", "is_method", "(", "arg", ",", "min_arity", "=", "None", ",", "max_arity", "=", "None", ")", ":", "if", "not", "callable", "(", "arg", ")", ":", "return", "False", "if", "not", "any", "(", "is_", "(", "arg", ")", "for", "is_", "in", "(", "inspect", ".", "ismethod", ",", "inspect", ".", "ismethoddescriptor", ",", "inspect", ".", "isbuiltin", ")", ")", ":", "return", "False", "try", ":", "argnames", ",", "varargs", ",", "kwargs", ",", "defaults", "=", "getargspec", "(", "arg", ")", "except", "TypeError", ":", "# On CPython 2.x, built-in methods of file aren't inspectable,", "# so if it's file.read() or file.write(), we can't tell it for sure.", "# Given how this check is being used, assuming the best is probably", "# all we can do here.", "return", "True", "else", ":", "if", "argnames", "and", "argnames", "[", "0", "]", "==", "'self'", ":", "argnames", "=", "argnames", "[", "1", ":", "]", "if", "min_arity", "is", "not", "None", ":", "actual_min_arity", "=", "len", "(", "argnames", ")", "-", "len", "(", "defaults", "or", "(", ")", ")", "assert", "actual_min_arity", ">=", "0", ",", "(", "\"Minimum arity of %r found to be negative (got %s)!\"", "%", "(", "arg", ",", "actual_min_arity", ")", ")", "if", "int", "(", "min_arity", ")", "!=", "actual_min_arity", ":", "return", "False", "if", "max_arity", "is", "not", "None", ":", "actual_max_arity", "=", "sys", ".", "maxsize", "if", "varargs", "or", "kwargs", "else", "len", "(", "argnames", ")", "if", "int", "(", "max_arity", ")", "!=", "actual_max_arity", ":", "return", "False", "return", "True" ]
Check if argument is a method. Optionally, we can also check if minimum or maximum arities (number of accepted arguments) match given minimum and/or maximum.
[ "Check", "if", "argument", "is", "a", "method", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/objects.py#L115-L154
train
Xion/callee
callee/objects.py
FileLike._is_readable
def _is_readable(self, obj): """Check if the argument is a readable file-like object.""" try: read = getattr(obj, 'read') except AttributeError: return False else: return is_method(read, max_arity=1)
python
def _is_readable(self, obj): """Check if the argument is a readable file-like object.""" try: read = getattr(obj, 'read') except AttributeError: return False else: return is_method(read, max_arity=1)
[ "def", "_is_readable", "(", "self", ",", "obj", ")", ":", "try", ":", "read", "=", "getattr", "(", "obj", ",", "'read'", ")", "except", "AttributeError", ":", "return", "False", "else", ":", "return", "is_method", "(", "read", ",", "max_arity", "=", "1", ")" ]
Check if the argument is a readable file-like object.
[ "Check", "if", "the", "argument", "is", "a", "readable", "file", "-", "like", "object", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/objects.py#L85-L92
train
Xion/callee
callee/objects.py
FileLike._is_writable
def _is_writable(self, obj): """Check if the argument is a writable file-like object.""" try: write = getattr(obj, 'write') except AttributeError: return False else: return is_method(write, min_arity=1, max_arity=1)
python
def _is_writable(self, obj): """Check if the argument is a writable file-like object.""" try: write = getattr(obj, 'write') except AttributeError: return False else: return is_method(write, min_arity=1, max_arity=1)
[ "def", "_is_writable", "(", "self", ",", "obj", ")", ":", "try", ":", "write", "=", "getattr", "(", "obj", ",", "'write'", ")", "except", "AttributeError", ":", "return", "False", "else", ":", "return", "is_method", "(", "write", ",", "min_arity", "=", "1", ",", "max_arity", "=", "1", ")" ]
Check if the argument is a writable file-like object.
[ "Check", "if", "the", "argument", "is", "a", "writable", "file", "-", "like", "object", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/objects.py#L94-L101
train
scivision/msise00
msise00/base.py
run
def run(time: datetime, altkm: float, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) # has to be here # %% altitude 1-D if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)): atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()], f107a=f107a, f107=f107, Ap=Ap) # %% lat/lon grid at 1 altitude else: atmos = loopalt_gtd(time, glat, glon, altkm, f107a=f107a, f107=f107, Ap=Ap) return atmos
python
def run(time: datetime, altkm: float, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran. """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) # has to be here # %% altitude 1-D if glat.size == 1 and glon.size == 1 and isinstance(time, (str, date, datetime, np.datetime64)): atmos = rungtd1d(time, altkm, glat.squeeze()[()], glon.squeeze()[()], f107a=f107a, f107=f107, Ap=Ap) # %% lat/lon grid at 1 altitude else: atmos = loopalt_gtd(time, glat, glon, altkm, f107a=f107a, f107=f107, Ap=Ap) return atmos
[ "def", "run", "(", "time", ":", "datetime", ",", "altkm", ":", "float", ",", "glat", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "glon", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "*", ",", "f107a", ":", "float", "=", "None", ",", "f107", ":", "float", "=", "None", ",", "Ap", ":", "int", "=", "None", ")", "->", "xarray", ".", "Dataset", ":", "glat", "=", "np", ".", "atleast_2d", "(", "glat", ")", "glon", "=", "np", ".", "atleast_2d", "(", "glon", ")", "# has to be here", "# %% altitude 1-D", "if", "glat", ".", "size", "==", "1", "and", "glon", ".", "size", "==", "1", "and", "isinstance", "(", "time", ",", "(", "str", ",", "date", ",", "datetime", ",", "np", ".", "datetime64", ")", ")", ":", "atmos", "=", "rungtd1d", "(", "time", ",", "altkm", ",", "glat", ".", "squeeze", "(", ")", "[", "(", ")", "]", ",", "glon", ".", "squeeze", "(", ")", "[", "(", ")", "]", ",", "f107a", "=", "f107a", ",", "f107", "=", "f107", ",", "Ap", "=", "Ap", ")", "# %% lat/lon grid at 1 altitude", "else", ":", "atmos", "=", "loopalt_gtd", "(", "time", ",", "glat", ",", "glon", ",", "altkm", ",", "f107a", "=", "f107a", ",", "f107", "=", "f107", ",", "Ap", "=", "Ap", ")", "return", "atmos" ]
loops the rungtd1d function below. Figure it's easier to troubleshoot in Python than Fortran.
[ "loops", "the", "rungtd1d", "function", "below", ".", "Figure", "it", "s", "easier", "to", "troubleshoot", "in", "Python", "than", "Fortran", "." ]
13a283ec02679ab74672f284ba68a7a8f896dc6f
https://github.com/scivision/msise00/blob/13a283ec02679ab74672f284ba68a7a8f896dc6f/msise00/base.py#L34-L51
train
scivision/msise00
msise00/base.py
loopalt_gtd
def loopalt_gtd(time: datetime, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], altkm: Union[float, List[float], np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) assert glat.ndim == glon.ndim == 2 times = np.atleast_1d(time) assert times.ndim == 1 atmos = xarray.Dataset() for k, t in enumerate(times): print('computing', t) for i in range(glat.shape[0]): for j in range(glat.shape[1]): # atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])), # data_vars='minimal',coords='minimal',dim='lon') atm = rungtd1d(t, altkm, glat[i, j], glon[i, j], f107a=f107a, f107=f107, Ap=Ap) atmos = xarray.merge((atmos, atm)) atmos.attrs = atm.attrs return atmos
python
def loopalt_gtd(time: datetime, glat: Union[float, np.ndarray], glon: Union[float, np.ndarray], altkm: Union[float, List[float], np.ndarray], *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray """ glat = np.atleast_2d(glat) glon = np.atleast_2d(glon) assert glat.ndim == glon.ndim == 2 times = np.atleast_1d(time) assert times.ndim == 1 atmos = xarray.Dataset() for k, t in enumerate(times): print('computing', t) for i in range(glat.shape[0]): for j in range(glat.shape[1]): # atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])), # data_vars='minimal',coords='minimal',dim='lon') atm = rungtd1d(t, altkm, glat[i, j], glon[i, j], f107a=f107a, f107=f107, Ap=Ap) atmos = xarray.merge((atmos, atm)) atmos.attrs = atm.attrs return atmos
[ "def", "loopalt_gtd", "(", "time", ":", "datetime", ",", "glat", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "glon", ":", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ",", "altkm", ":", "Union", "[", "float", ",", "List", "[", "float", "]", ",", "np", ".", "ndarray", "]", ",", "*", ",", "f107a", ":", "float", "=", "None", ",", "f107", ":", "float", "=", "None", ",", "Ap", ":", "int", "=", "None", ")", "->", "xarray", ".", "Dataset", ":", "glat", "=", "np", ".", "atleast_2d", "(", "glat", ")", "glon", "=", "np", ".", "atleast_2d", "(", "glon", ")", "assert", "glat", ".", "ndim", "==", "glon", ".", "ndim", "==", "2", "times", "=", "np", ".", "atleast_1d", "(", "time", ")", "assert", "times", ".", "ndim", "==", "1", "atmos", "=", "xarray", ".", "Dataset", "(", ")", "for", "k", ",", "t", "in", "enumerate", "(", "times", ")", ":", "print", "(", "'computing'", ",", "t", ")", "for", "i", "in", "range", "(", "glat", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "glat", ".", "shape", "[", "1", "]", ")", ":", "# atmos = xarray.concat((atmos, rungtd1d(t, altkm, glat[i,j], glon[i,j])),", "# data_vars='minimal',coords='minimal',dim='lon')", "atm", "=", "rungtd1d", "(", "t", ",", "altkm", ",", "glat", "[", "i", ",", "j", "]", ",", "glon", "[", "i", ",", "j", "]", ",", "f107a", "=", "f107a", ",", "f107", "=", "f107", ",", "Ap", "=", "Ap", ")", "atmos", "=", "xarray", ".", "merge", "(", "(", "atmos", ",", "atm", ")", ")", "atmos", ".", "attrs", "=", "atm", ".", "attrs", "return", "atmos" ]
loop over location and time time: datetime or numpy.datetime64 or list of datetime or np.ndarray of datetime glat: float or 2-D np.ndarray glon: float or 2-D np.ndarray altkm: float or list or 1-D np.ndarray
[ "loop", "over", "location", "and", "time" ]
13a283ec02679ab74672f284ba68a7a8f896dc6f
https://github.com/scivision/msise00/blob/13a283ec02679ab74672f284ba68a7a8f896dc6f/msise00/base.py#L54-L87
train
scivision/msise00
msise00/base.py
rungtd1d
def rungtd1d(time: datetime, altkm: np.ndarray, glat: float, glon: float, *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ This is the "atomic" function looped by other functions """ time = todatetime(time) # %% get solar parameters for date if f107a and f107a and Ap: pass else: f107Ap = gi.getApF107(time, smoothdays=81) f107a = f107Ap['f107s'].item() f107 = f107Ap['f107'].item() Ap = f107Ap['Ap'].item() # %% dimensions altkm = np.atleast_1d(altkm) assert altkm.ndim == 1 assert isinstance(glon, (int, float)) assert isinstance(glat, (int, float)) # %% iyd = time.strftime('%y%j') altkm = np.atleast_1d(altkm) # %% dens = np.empty((altkm.size, len(species))) temp = np.empty((altkm.size, len(ttypes))) for i, a in enumerate(altkm): cmd = [str(EXE), iyd, str(time.hour), str(time.minute), str(time.second), str(glat), str(glon), str(f107a), str(f107), str(Ap), str(a)] ret = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.DEVNULL) f = io.StringIO(ret) dens[i, :] = np.genfromtxt(f, max_rows=1) temp[i, :] = np.genfromtxt(f, max_rows=1) dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)} dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]), 'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])}) atmos = xarray.Dataset(dsf, coords={'time': [time], 'alt_km': altkm, 'lat': [glat], 'lon': [glon], }, attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a, 'species': species}) return atmos
python
def rungtd1d(time: datetime, altkm: np.ndarray, glat: float, glon: float, *, f107a: float = None, f107: float = None, Ap: int = None) -> xarray.Dataset: """ This is the "atomic" function looped by other functions """ time = todatetime(time) # %% get solar parameters for date if f107a and f107a and Ap: pass else: f107Ap = gi.getApF107(time, smoothdays=81) f107a = f107Ap['f107s'].item() f107 = f107Ap['f107'].item() Ap = f107Ap['Ap'].item() # %% dimensions altkm = np.atleast_1d(altkm) assert altkm.ndim == 1 assert isinstance(glon, (int, float)) assert isinstance(glat, (int, float)) # %% iyd = time.strftime('%y%j') altkm = np.atleast_1d(altkm) # %% dens = np.empty((altkm.size, len(species))) temp = np.empty((altkm.size, len(ttypes))) for i, a in enumerate(altkm): cmd = [str(EXE), iyd, str(time.hour), str(time.minute), str(time.second), str(glat), str(glon), str(f107a), str(f107), str(Ap), str(a)] ret = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.DEVNULL) f = io.StringIO(ret) dens[i, :] = np.genfromtxt(f, max_rows=1) temp[i, :] = np.genfromtxt(f, max_rows=1) dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)} dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]), 'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])}) atmos = xarray.Dataset(dsf, coords={'time': [time], 'alt_km': altkm, 'lat': [glat], 'lon': [glon], }, attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a, 'species': species}) return atmos
[ "def", "rungtd1d", "(", "time", ":", "datetime", ",", "altkm", ":", "np", ".", "ndarray", ",", "glat", ":", "float", ",", "glon", ":", "float", ",", "*", ",", "f107a", ":", "float", "=", "None", ",", "f107", ":", "float", "=", "None", ",", "Ap", ":", "int", "=", "None", ")", "->", "xarray", ".", "Dataset", ":", "time", "=", "todatetime", "(", "time", ")", "# %% get solar parameters for date", "if", "f107a", "and", "f107a", "and", "Ap", ":", "pass", "else", ":", "f107Ap", "=", "gi", ".", "getApF107", "(", "time", ",", "smoothdays", "=", "81", ")", "f107a", "=", "f107Ap", "[", "'f107s'", "]", ".", "item", "(", ")", "f107", "=", "f107Ap", "[", "'f107'", "]", ".", "item", "(", ")", "Ap", "=", "f107Ap", "[", "'Ap'", "]", ".", "item", "(", ")", "# %% dimensions", "altkm", "=", "np", ".", "atleast_1d", "(", "altkm", ")", "assert", "altkm", ".", "ndim", "==", "1", "assert", "isinstance", "(", "glon", ",", "(", "int", ",", "float", ")", ")", "assert", "isinstance", "(", "glat", ",", "(", "int", ",", "float", ")", ")", "# %%", "iyd", "=", "time", ".", "strftime", "(", "'%y%j'", ")", "altkm", "=", "np", ".", "atleast_1d", "(", "altkm", ")", "# %%", "dens", "=", "np", ".", "empty", "(", "(", "altkm", ".", "size", ",", "len", "(", "species", ")", ")", ")", "temp", "=", "np", ".", "empty", "(", "(", "altkm", ".", "size", ",", "len", "(", "ttypes", ")", ")", ")", "for", "i", ",", "a", "in", "enumerate", "(", "altkm", ")", ":", "cmd", "=", "[", "str", "(", "EXE", ")", ",", "iyd", ",", "str", "(", "time", ".", "hour", ")", ",", "str", "(", "time", ".", "minute", ")", ",", "str", "(", "time", ".", "second", ")", ",", "str", "(", "glat", ")", ",", "str", "(", "glon", ")", ",", "str", "(", "f107a", ")", ",", "str", "(", "f107", ")", ",", "str", "(", "Ap", ")", ",", "str", "(", "a", ")", "]", "ret", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "f", "=", "io", ".", "StringIO", "(", "ret", ")", "dens", "[", "i", ",", ":", "]", "=", "np", ".", "genfromtxt", "(", "f", ",", "max_rows", "=", "1", ")", "temp", "[", "i", ",", ":", "]", "=", "np", ".", "genfromtxt", "(", "f", ",", "max_rows", "=", "1", ")", "dsf", "=", "{", "k", ":", "(", "(", "'time'", ",", "'alt_km'", ",", "'lat'", ",", "'lon'", ")", ",", "v", "[", "None", ",", ":", ",", "None", ",", "None", "]", ")", "for", "(", "k", ",", "v", ")", "in", "zip", "(", "species", ",", "dens", ".", "T", ")", "}", "dsf", ".", "update", "(", "{", "'Tn'", ":", "(", "(", "'time'", ",", "'alt_km'", ",", "'lat'", ",", "'lon'", ")", ",", "temp", "[", ":", ",", "1", "]", "[", "None", ",", ":", ",", "None", ",", "None", "]", ")", ",", "'Texo'", ":", "(", "(", "'time'", ",", "'alt_km'", ",", "'lat'", ",", "'lon'", ")", ",", "temp", "[", ":", ",", "0", "]", "[", "None", ",", ":", ",", "None", ",", "None", "]", ")", "}", ")", "atmos", "=", "xarray", ".", "Dataset", "(", "dsf", ",", "coords", "=", "{", "'time'", ":", "[", "time", "]", ",", "'alt_km'", ":", "altkm", ",", "'lat'", ":", "[", "glat", "]", ",", "'lon'", ":", "[", "glon", "]", ",", "}", ",", "attrs", "=", "{", "'Ap'", ":", "Ap", ",", "'f107'", ":", "f107", ",", "'f107a'", ":", "f107a", ",", "'species'", ":", "species", "}", ")", "return", "atmos" ]
This is the "atomic" function looped by other functions
[ "This", "is", "the", "atomic", "function", "looped", "by", "other", "functions" ]
13a283ec02679ab74672f284ba68a7a8f896dc6f
https://github.com/scivision/msise00/blob/13a283ec02679ab74672f284ba68a7a8f896dc6f/msise00/base.py#L90-L140
train
Xion/callee
callee/general.py
Matching._validate_desc
def _validate_desc(self, desc): """Validate the predicate description.""" if desc is None: return desc if not isinstance(desc, STRING_TYPES): raise TypeError( "predicate description for Matching must be a string, " "got %r" % (type(desc),)) # Python 2 mandates __repr__ to be an ASCII string, # so if Unicode is passed (usually due to unicode_literals), # it should be ASCII-encodable. if not IS_PY3 and isinstance(desc, unicode): try: desc = desc.encode('ascii', errors='strict') except UnicodeEncodeError: raise TypeError("predicate description must be " "an ASCII string in Python 2") return desc
python
def _validate_desc(self, desc): """Validate the predicate description.""" if desc is None: return desc if not isinstance(desc, STRING_TYPES): raise TypeError( "predicate description for Matching must be a string, " "got %r" % (type(desc),)) # Python 2 mandates __repr__ to be an ASCII string, # so if Unicode is passed (usually due to unicode_literals), # it should be ASCII-encodable. if not IS_PY3 and isinstance(desc, unicode): try: desc = desc.encode('ascii', errors='strict') except UnicodeEncodeError: raise TypeError("predicate description must be " "an ASCII string in Python 2") return desc
[ "def", "_validate_desc", "(", "self", ",", "desc", ")", ":", "if", "desc", "is", "None", ":", "return", "desc", "if", "not", "isinstance", "(", "desc", ",", "STRING_TYPES", ")", ":", "raise", "TypeError", "(", "\"predicate description for Matching must be a string, \"", "\"got %r\"", "%", "(", "type", "(", "desc", ")", ",", ")", ")", "# Python 2 mandates __repr__ to be an ASCII string,", "# so if Unicode is passed (usually due to unicode_literals),", "# it should be ASCII-encodable.", "if", "not", "IS_PY3", "and", "isinstance", "(", "desc", ",", "unicode", ")", ":", "try", ":", "desc", "=", "desc", ".", "encode", "(", "'ascii'", ",", "errors", "=", "'strict'", ")", "except", "UnicodeEncodeError", ":", "raise", "TypeError", "(", "\"predicate description must be \"", "\"an ASCII string in Python 2\"", ")", "return", "desc" ]
Validate the predicate description.
[ "Validate", "the", "predicate", "description", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/general.py#L54-L74
train
Xion/callee
callee/operators.py
OperatorMatcher._get_placeholder_repr
def _get_placeholder_repr(self): """Return the placeholder part of matcher's ``__repr__``.""" placeholder = '...' if self.TRANSFORM is not None: placeholder = '%s(%s)' % (self.TRANSFORM.__name__, placeholder) return placeholder
python
def _get_placeholder_repr(self): """Return the placeholder part of matcher's ``__repr__``.""" placeholder = '...' if self.TRANSFORM is not None: placeholder = '%s(%s)' % (self.TRANSFORM.__name__, placeholder) return placeholder
[ "def", "_get_placeholder_repr", "(", "self", ")", ":", "placeholder", "=", "'...'", "if", "self", ".", "TRANSFORM", "is", "not", "None", ":", "placeholder", "=", "'%s(%s)'", "%", "(", "self", ".", "TRANSFORM", ".", "__name__", ",", "placeholder", ")", "return", "placeholder" ]
Return the placeholder part of matcher's ``__repr__``.
[ "Return", "the", "placeholder", "part", "of", "matcher", "s", "__repr__", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/operators.py#L104-L109
train
Xion/callee
callee/base.py
BaseMatcherMetaclass._validate_class_definition
def _validate_class_definition(meta, classname, bases, dict_): """Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem """ # let the BaseMatcher class be created without hassle if meta._is_base_matcher_class_definition(classname, dict_): return # ensure that no important magic methods are being overridden for name, member in dict_.items(): if not (name.startswith('__') and name.endswith('__')): continue # check if it's not a whitelisted magic method name name = name[2:-2] if not name: continue # unlikely case of a ``____`` function if name not in meta._list_magic_methods(BaseMatcher): continue if name in meta.USER_OVERRIDABLE_MAGIC_METHODS: continue # non-function attributes, like __slots__, are harmless if not inspect.isfunction(member): continue # classes in this very module are exempt, since they define # the very behavior of matchers we want to protect if member.__module__ == __name__: continue raise RuntimeError( "matcher class %s cannot override the __%s__ method" % ( classname, name))
python
def _validate_class_definition(meta, classname, bases, dict_): """Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem """ # let the BaseMatcher class be created without hassle if meta._is_base_matcher_class_definition(classname, dict_): return # ensure that no important magic methods are being overridden for name, member in dict_.items(): if not (name.startswith('__') and name.endswith('__')): continue # check if it's not a whitelisted magic method name name = name[2:-2] if not name: continue # unlikely case of a ``____`` function if name not in meta._list_magic_methods(BaseMatcher): continue if name in meta.USER_OVERRIDABLE_MAGIC_METHODS: continue # non-function attributes, like __slots__, are harmless if not inspect.isfunction(member): continue # classes in this very module are exempt, since they define # the very behavior of matchers we want to protect if member.__module__ == __name__: continue raise RuntimeError( "matcher class %s cannot override the __%s__ method" % ( classname, name))
[ "def", "_validate_class_definition", "(", "meta", ",", "classname", ",", "bases", ",", "dict_", ")", ":", "# let the BaseMatcher class be created without hassle", "if", "meta", ".", "_is_base_matcher_class_definition", "(", "classname", ",", "dict_", ")", ":", "return", "# ensure that no important magic methods are being overridden", "for", "name", ",", "member", "in", "dict_", ".", "items", "(", ")", ":", "if", "not", "(", "name", ".", "startswith", "(", "'__'", ")", "and", "name", ".", "endswith", "(", "'__'", ")", ")", ":", "continue", "# check if it's not a whitelisted magic method name", "name", "=", "name", "[", "2", ":", "-", "2", "]", "if", "not", "name", ":", "continue", "# unlikely case of a ``____`` function", "if", "name", "not", "in", "meta", ".", "_list_magic_methods", "(", "BaseMatcher", ")", ":", "continue", "if", "name", "in", "meta", ".", "USER_OVERRIDABLE_MAGIC_METHODS", ":", "continue", "# non-function attributes, like __slots__, are harmless", "if", "not", "inspect", ".", "isfunction", "(", "member", ")", ":", "continue", "# classes in this very module are exempt, since they define", "# the very behavior of matchers we want to protect", "if", "member", ".", "__module__", "==", "__name__", ":", "continue", "raise", "RuntimeError", "(", "\"matcher class %s cannot override the __%s__ method\"", "%", "(", "classname", ",", "name", ")", ")" ]
Ensure the matcher class definition is acceptable. :raise RuntimeError: If there is a problem
[ "Ensure", "the", "matcher", "class", "definition", "is", "acceptable", ".", ":", "raise", "RuntimeError", ":", "If", "there", "is", "a", "problem" ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/base.py#L38-L71
train
Xion/callee
callee/base.py
BaseMatcherMetaclass._is_base_matcher_class_definition
def _is_base_matcher_class_definition(meta, classname, dict_): """Checks whether given class name and dictionary define the :class:`BaseMatcher`. """ if classname != 'BaseMatcher': return False methods = list(filter(inspect.isfunction, dict_.values())) return methods and all(m.__module__ == __name__ for m in methods)
python
def _is_base_matcher_class_definition(meta, classname, dict_): """Checks whether given class name and dictionary define the :class:`BaseMatcher`. """ if classname != 'BaseMatcher': return False methods = list(filter(inspect.isfunction, dict_.values())) return methods and all(m.__module__ == __name__ for m in methods)
[ "def", "_is_base_matcher_class_definition", "(", "meta", ",", "classname", ",", "dict_", ")", ":", "if", "classname", "!=", "'BaseMatcher'", ":", "return", "False", "methods", "=", "list", "(", "filter", "(", "inspect", ".", "isfunction", ",", "dict_", ".", "values", "(", ")", ")", ")", "return", "methods", "and", "all", "(", "m", ".", "__module__", "==", "__name__", "for", "m", "in", "methods", ")" ]
Checks whether given class name and dictionary define the :class:`BaseMatcher`.
[ "Checks", "whether", "given", "class", "name", "and", "dictionary", "define", "the", ":", "class", ":", "BaseMatcher", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/base.py#L74-L81
train
Xion/callee
callee/base.py
BaseMatcherMetaclass._list_magic_methods
def _list_magic_methods(meta, class_): """Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix """ return [ name[2:-2] for name, member in class_.__dict__.items() if len(name) > 4 and name.startswith('__') and name.endswith('__') and inspect.isfunction(member) ]
python
def _list_magic_methods(meta, class_): """Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix """ return [ name[2:-2] for name, member in class_.__dict__.items() if len(name) > 4 and name.startswith('__') and name.endswith('__') and inspect.isfunction(member) ]
[ "def", "_list_magic_methods", "(", "meta", ",", "class_", ")", ":", "return", "[", "name", "[", "2", ":", "-", "2", "]", "for", "name", ",", "member", "in", "class_", ".", "__dict__", ".", "items", "(", ")", "if", "len", "(", "name", ")", ">", "4", "and", "name", ".", "startswith", "(", "'__'", ")", "and", "name", ".", "endswith", "(", "'__'", ")", "and", "inspect", ".", "isfunction", "(", "member", ")", "]" ]
Return names of magic methods defined by a class. :return: Iterable of magic methods, each w/o the ``__`` prefix/suffix
[ "Return", "names", "of", "magic", "methods", "defined", "by", "a", "class", ".", ":", "return", ":", "Iterable", "of", "magic", "methods", "each", "w", "/", "o", "the", "__", "prefix", "/", "suffix" ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/base.py#L84-L92
train
podhmo/python-semver
semver/__init__.py
semver
def semver(version, loose): if isinstance(version, SemVer): if version.loose == loose: return version else: version = version.version elif not isinstance(version, str): # xxx: raise InvalidTypeIncluded("must be str, but {!r}".format(version)) """ if (!(this instanceof SemVer)) return new SemVer(version, loose); """ return SemVer(version, loose)
python
def semver(version, loose): if isinstance(version, SemVer): if version.loose == loose: return version else: version = version.version elif not isinstance(version, str): # xxx: raise InvalidTypeIncluded("must be str, but {!r}".format(version)) """ if (!(this instanceof SemVer)) return new SemVer(version, loose); """ return SemVer(version, loose)
[ "def", "semver", "(", "version", ",", "loose", ")", ":", "if", "isinstance", "(", "version", ",", "SemVer", ")", ":", "if", "version", ".", "loose", "==", "loose", ":", "return", "version", "else", ":", "version", "=", "version", ".", "version", "elif", "not", "isinstance", "(", "version", ",", "str", ")", ":", "# xxx:", "raise", "InvalidTypeIncluded", "(", "\"must be str, but {!r}\"", ".", "format", "(", "version", ")", ")", "return", "SemVer", "(", "version", ",", "loose", ")" ]
if (!(this instanceof SemVer)) return new SemVer(version, loose);
[ "if", "(", "!", "(", "this", "instanceof", "SemVer", "))", "return", "new", "SemVer", "(", "version", "loose", ")", ";" ]
46d81c5d70ee716c48c7a8d44f9fefc6b86be33c
https://github.com/podhmo/python-semver/blob/46d81c5d70ee716c48c7a8d44f9fefc6b86be33c/semver/__init__.py#L288-L301
train
Xion/callee
docs/conf.py
autodoc_process_docstring
def autodoc_process_docstring(app, what, name, obj, options, lines): """Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. """ # check that only symbols that can be directly imported from ``callee`` # package are being documented _, symbol = name.rsplit('.', 1) if symbol not in callee.__all__: raise SphinxError( "autodoc'd '%s' is not a part of the public API!" % name) # for classes exempt from automatic merging of class & __init__ docs, # pretend their __init__ methods have no docstring at all, # so that nothing will be appended to the class's docstring if what == 'class' and name in autoclass_content_exceptions: # amusingly, when autodoc reads the constructor's docstring # for appending it to class docstring, it will report ``what`` # as 'class' (again!); hence we must check what it actually read ctor_docstring_lines = prepare_docstring(obj.__init__.__doc__) if lines == ctor_docstring_lines: lines[:] = []
python
def autodoc_process_docstring(app, what, name, obj, options, lines): """Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output. """ # check that only symbols that can be directly imported from ``callee`` # package are being documented _, symbol = name.rsplit('.', 1) if symbol not in callee.__all__: raise SphinxError( "autodoc'd '%s' is not a part of the public API!" % name) # for classes exempt from automatic merging of class & __init__ docs, # pretend their __init__ methods have no docstring at all, # so that nothing will be appended to the class's docstring if what == 'class' and name in autoclass_content_exceptions: # amusingly, when autodoc reads the constructor's docstring # for appending it to class docstring, it will report ``what`` # as 'class' (again!); hence we must check what it actually read ctor_docstring_lines = prepare_docstring(obj.__init__.__doc__) if lines == ctor_docstring_lines: lines[:] = []
[ "def", "autodoc_process_docstring", "(", "app", ",", "what", ",", "name", ",", "obj", ",", "options", ",", "lines", ")", ":", "# check that only symbols that can be directly imported from ``callee``", "# package are being documented", "_", ",", "symbol", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "if", "symbol", "not", "in", "callee", ".", "__all__", ":", "raise", "SphinxError", "(", "\"autodoc'd '%s' is not a part of the public API!\"", "%", "name", ")", "# for classes exempt from automatic merging of class & __init__ docs,", "# pretend their __init__ methods have no docstring at all,", "# so that nothing will be appended to the class's docstring", "if", "what", "==", "'class'", "and", "name", "in", "autoclass_content_exceptions", ":", "# amusingly, when autodoc reads the constructor's docstring", "# for appending it to class docstring, it will report ``what``", "# as 'class' (again!); hence we must check what it actually read", "ctor_docstring_lines", "=", "prepare_docstring", "(", "obj", ".", "__init__", ".", "__doc__", ")", "if", "lines", "==", "ctor_docstring_lines", ":", "lines", "[", ":", "]", "=", "[", "]" ]
Handler for the event emitted when autodoc processes a docstring. See http://sphinx-doc.org/ext/autodoc.html#event-autodoc-process-docstring. The TL;DR is that we can modify ``lines`` in-place to influence the output.
[ "Handler", "for", "the", "event", "emitted", "when", "autodoc", "processes", "a", "docstring", ".", "See", "http", ":", "//", "sphinx", "-", "doc", ".", "org", "/", "ext", "/", "autodoc", ".", "html#event", "-", "autodoc", "-", "process", "-", "docstring", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/docs/conf.py#L352-L374
train
WoLpH/mailjet
mailjet/contrib/django_mailjet/forms.py
SubscriptionForm.clean_email
def clean_email(self): """ Raise ValidationError if the contact exists. """ contacts = self.api.lists.contacts(id=self.list_id)['result'] for contact in contacts: if contact['email'] == self.cleaned_data['email']: raise forms.ValidationError( _(u'This email is already subscribed')) return self.cleaned_data['email']
python
def clean_email(self): """ Raise ValidationError if the contact exists. """ contacts = self.api.lists.contacts(id=self.list_id)['result'] for contact in contacts: if contact['email'] == self.cleaned_data['email']: raise forms.ValidationError( _(u'This email is already subscribed')) return self.cleaned_data['email']
[ "def", "clean_email", "(", "self", ")", ":", "contacts", "=", "self", ".", "api", ".", "lists", ".", "contacts", "(", "id", "=", "self", ".", "list_id", ")", "[", "'result'", "]", "for", "contact", "in", "contacts", ":", "if", "contact", "[", "'email'", "]", "==", "self", ".", "cleaned_data", "[", "'email'", "]", ":", "raise", "forms", ".", "ValidationError", "(", "_", "(", "u'This email is already subscribed'", ")", ")", "return", "self", ".", "cleaned_data", "[", "'email'", "]" ]
Raise ValidationError if the contact exists.
[ "Raise", "ValidationError", "if", "the", "contact", "exists", "." ]
f7f5102bf52be6a4a9c62afe474387481c806e27
https://github.com/WoLpH/mailjet/blob/f7f5102bf52be6a4a9c62afe474387481c806e27/mailjet/contrib/django_mailjet/forms.py#L23-L32
train
WoLpH/mailjet
mailjet/contrib/django_mailjet/forms.py
SubscriptionForm.add_contact
def add_contact(self): """ Create a contact with using the email on the list. """ self.api.lists.addcontact( contact=self.cleaned_data['email'], id=self.list_id, method='POST')
python
def add_contact(self): """ Create a contact with using the email on the list. """ self.api.lists.addcontact( contact=self.cleaned_data['email'], id=self.list_id, method='POST')
[ "def", "add_contact", "(", "self", ")", ":", "self", ".", "api", ".", "lists", ".", "addcontact", "(", "contact", "=", "self", ".", "cleaned_data", "[", "'email'", "]", ",", "id", "=", "self", ".", "list_id", ",", "method", "=", "'POST'", ")" ]
Create a contact with using the email on the list.
[ "Create", "a", "contact", "with", "using", "the", "email", "on", "the", "list", "." ]
f7f5102bf52be6a4a9c62afe474387481c806e27
https://github.com/WoLpH/mailjet/blob/f7f5102bf52be6a4a9c62afe474387481c806e27/mailjet/contrib/django_mailjet/forms.py#L38-L41
train
WoLpH/mailjet
mailjet/contrib/django_mailjet/forms.py
SubscriptionForm.api
def api(self): """ Get or create an Api() instance using django settings. """ api = getattr(self, '_api', None) if api is None: self._api = mailjet.Api() return self._api
python
def api(self): """ Get or create an Api() instance using django settings. """ api = getattr(self, '_api', None) if api is None: self._api = mailjet.Api() return self._api
[ "def", "api", "(", "self", ")", ":", "api", "=", "getattr", "(", "self", ",", "'_api'", ",", "None", ")", "if", "api", "is", "None", ":", "self", ".", "_api", "=", "mailjet", ".", "Api", "(", ")", "return", "self", ".", "_api" ]
Get or create an Api() instance using django settings.
[ "Get", "or", "create", "an", "Api", "()", "instance", "using", "django", "settings", "." ]
f7f5102bf52be6a4a9c62afe474387481c806e27
https://github.com/WoLpH/mailjet/blob/f7f5102bf52be6a4a9c62afe474387481c806e27/mailjet/contrib/django_mailjet/forms.py#L44-L51
train
WoLpH/mailjet
mailjet/contrib/django_mailjet/forms.py
SubscriptionForm.list_id
def list_id(self): """ Get or create the list id. """ list_id = getattr(self, '_list_id', None) if list_id is None: for l in self.api.lists.all()['lists']: if l['name'] == self.list_name: self._list_id = l['id'] if not getattr(self, '_list_id', None): self._list_id = self.api.lists.create( label=self.list_label, name=self.list_name, method='POST')['list_id'] return self._list_id
python
def list_id(self): """ Get or create the list id. """ list_id = getattr(self, '_list_id', None) if list_id is None: for l in self.api.lists.all()['lists']: if l['name'] == self.list_name: self._list_id = l['id'] if not getattr(self, '_list_id', None): self._list_id = self.api.lists.create( label=self.list_label, name=self.list_name, method='POST')['list_id'] return self._list_id
[ "def", "list_id", "(", "self", ")", ":", "list_id", "=", "getattr", "(", "self", ",", "'_list_id'", ",", "None", ")", "if", "list_id", "is", "None", ":", "for", "l", "in", "self", ".", "api", ".", "lists", ".", "all", "(", ")", "[", "'lists'", "]", ":", "if", "l", "[", "'name'", "]", "==", "self", ".", "list_name", ":", "self", ".", "_list_id", "=", "l", "[", "'id'", "]", "if", "not", "getattr", "(", "self", ",", "'_list_id'", ",", "None", ")", ":", "self", ".", "_list_id", "=", "self", ".", "api", ".", "lists", ".", "create", "(", "label", "=", "self", ".", "list_label", ",", "name", "=", "self", ".", "list_name", ",", "method", "=", "'POST'", ")", "[", "'list_id'", "]", "return", "self", ".", "_list_id" ]
Get or create the list id.
[ "Get", "or", "create", "the", "list", "id", "." ]
f7f5102bf52be6a4a9c62afe474387481c806e27
https://github.com/WoLpH/mailjet/blob/f7f5102bf52be6a4a9c62afe474387481c806e27/mailjet/contrib/django_mailjet/forms.py#L54-L68
train
Xion/callee
callee/_compat.py
getargspec
def getargspec(obj): """Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it. """ try: return inspect.getargspec(obj) except AttributeError: pass # we let a TypeError through # translate the signature object back into the 4-tuple argnames = [] varargname, kwargname = None, None defaults = [] for name, param in inspect.signature(obj): if param.kind == inspect.Parameter.VAR_POSITIONAL: varargname = name elif param.kind == inspect.Parameter.VAR_KEYWORD: kwargname = name else: argnames.append(name) if param.default is not inspect.Parameter.empty: defaults.append(param.default) defaults = defaults or None return argnames, varargname, kwargname, defaults
python
def getargspec(obj): """Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it. """ try: return inspect.getargspec(obj) except AttributeError: pass # we let a TypeError through # translate the signature object back into the 4-tuple argnames = [] varargname, kwargname = None, None defaults = [] for name, param in inspect.signature(obj): if param.kind == inspect.Parameter.VAR_POSITIONAL: varargname = name elif param.kind == inspect.Parameter.VAR_KEYWORD: kwargname = name else: argnames.append(name) if param.default is not inspect.Parameter.empty: defaults.append(param.default) defaults = defaults or None return argnames, varargname, kwargname, defaults
[ "def", "getargspec", "(", "obj", ")", ":", "try", ":", "return", "inspect", ".", "getargspec", "(", "obj", ")", "except", "AttributeError", ":", "pass", "# we let a TypeError through", "# translate the signature object back into the 4-tuple", "argnames", "=", "[", "]", "varargname", ",", "kwargname", "=", "None", ",", "None", "defaults", "=", "[", "]", "for", "name", ",", "param", "in", "inspect", ".", "signature", "(", "obj", ")", ":", "if", "param", ".", "kind", "==", "inspect", ".", "Parameter", ".", "VAR_POSITIONAL", ":", "varargname", "=", "name", "elif", "param", ".", "kind", "==", "inspect", ".", "Parameter", ".", "VAR_KEYWORD", ":", "kwargname", "=", "name", "else", ":", "argnames", ".", "append", "(", "name", ")", "if", "param", ".", "default", "is", "not", "inspect", ".", "Parameter", ".", "empty", ":", "defaults", ".", "append", "(", "param", ".", "default", ")", "defaults", "=", "defaults", "or", "None", "return", "argnames", ",", "varargname", ",", "kwargname", ",", "defaults" ]
Portable version of inspect.getargspec(). Necessary because the original is no longer available starting from Python 3.6. :return: 4-tuple of (argnames, varargname, kwargname, defaults) Note that distinction between positional-or-keyword and keyword-only parameters will be lost, as the original getargspec() doesn't honor it.
[ "Portable", "version", "of", "inspect", ".", "getargspec", "()", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/_compat.py#L81-L112
train
Xion/callee
setup.py
read_tags
def read_tags(filename): """Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ with open(filename) as f: ast_tree = ast.parse(f.read(), filename) res = {} for node in ast.walk(ast_tree): if type(node) is not ast.Assign: continue target = node.targets[0] if type(target) is not ast.Name: continue if not (target.id.startswith('__') and target.id.endswith('__')): continue name = target.id[2:-2] res[name] = ast.literal_eval(node.value) return res
python
def read_tags(filename): """Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags """ with open(filename) as f: ast_tree = ast.parse(f.read(), filename) res = {} for node in ast.walk(ast_tree): if type(node) is not ast.Assign: continue target = node.targets[0] if type(target) is not ast.Name: continue if not (target.id.startswith('__') and target.id.endswith('__')): continue name = target.id[2:-2] res[name] = ast.literal_eval(node.value) return res
[ "def", "read_tags", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "ast_tree", "=", "ast", ".", "parse", "(", "f", ".", "read", "(", ")", ",", "filename", ")", "res", "=", "{", "}", "for", "node", "in", "ast", ".", "walk", "(", "ast_tree", ")", ":", "if", "type", "(", "node", ")", "is", "not", "ast", ".", "Assign", ":", "continue", "target", "=", "node", ".", "targets", "[", "0", "]", "if", "type", "(", "target", ")", "is", "not", "ast", ".", "Name", ":", "continue", "if", "not", "(", "target", ".", "id", ".", "startswith", "(", "'__'", ")", "and", "target", ".", "id", ".", "endswith", "(", "'__'", ")", ")", ":", "continue", "name", "=", "target", ".", "id", "[", "2", ":", "-", "2", "]", "res", "[", "name", "]", "=", "ast", ".", "literal_eval", "(", "node", ".", "value", ")", "return", "res" ]
Reads values of "magic tags" defined in the given Python file. :param filename: Python filename to read the tags from :return: Dictionary of tags
[ "Reads", "values", "of", "magic", "tags", "defined", "in", "the", "given", "Python", "file", "." ]
58740f73ff9a76f5fe0075bf18d7345a0f9d961c
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/setup.py#L16-L39
train
MichaelAquilina/hashedindex
hashedindex/textparser.py
normalize_unicode
def normalize_unicode(text): """ Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize """ if isinstance(text, six.text_type): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') else: return text
python
def normalize_unicode(text): """ Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize """ if isinstance(text, six.text_type): return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8') else: return text
[ "def", "normalize_unicode", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "return", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "text", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "'utf8'", ")", "else", ":", "return", "text" ]
Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
[ "Normalize", "any", "unicode", "characters", "to", "ascii", "equivalent", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "2", "/", "library", "/", "unicodedata", ".", "html#unicodedata", ".", "normalize" ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/textparser.py#L51-L59
train
MichaelAquilina/hashedindex
hashedindex/textparser.py
word_tokenize
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
python
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True): """ Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character. """ if ngrams is None: ngrams = 1 text = re.sub(re.compile('\'s'), '', text) # Simple heuristic text = re.sub(_re_punctuation, '', text) matched_tokens = re.findall(_re_token, text.lower()) for tokens in get_ngrams(matched_tokens, ngrams): for i in range(len(tokens)): tokens[i] = tokens[i].strip(punctuation) if len(tokens[i]) < min_length or tokens[i] in stopwords: break if ignore_numeric and isnumeric(tokens[i]): break else: yield tuple(tokens)
[ "def", "word_tokenize", "(", "text", ",", "stopwords", "=", "_stopwords", ",", "ngrams", "=", "None", ",", "min_length", "=", "0", ",", "ignore_numeric", "=", "True", ")", ":", "if", "ngrams", "is", "None", ":", "ngrams", "=", "1", "text", "=", "re", ".", "sub", "(", "re", ".", "compile", "(", "'\\'s'", ")", ",", "''", ",", "text", ")", "# Simple heuristic", "text", "=", "re", ".", "sub", "(", "_re_punctuation", ",", "''", ",", "text", ")", "matched_tokens", "=", "re", ".", "findall", "(", "_re_token", ",", "text", ".", "lower", "(", ")", ")", "for", "tokens", "in", "get_ngrams", "(", "matched_tokens", ",", "ngrams", ")", ":", "for", "i", "in", "range", "(", "len", "(", "tokens", ")", ")", ":", "tokens", "[", "i", "]", "=", "tokens", "[", "i", "]", ".", "strip", "(", "punctuation", ")", "if", "len", "(", "tokens", "[", "i", "]", ")", "<", "min_length", "or", "tokens", "[", "i", "]", "in", "stopwords", ":", "break", "if", "ignore_numeric", "and", "isnumeric", "(", "tokens", "[", "i", "]", ")", ":", "break", "else", ":", "yield", "tuple", "(", "tokens", ")" ]
Parses the given text and yields tokens which represent words within the given text. Tokens are assumed to be divided by any form of whitespace character.
[ "Parses", "the", "given", "text", "and", "yields", "tokens", "which", "represent", "words", "within", "the", "given", "text", ".", "Tokens", "are", "assumed", "to", "be", "divided", "by", "any", "form", "of", "whitespace", "character", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/textparser.py#L67-L89
train
scivision/msise00
msise00/build.py
cmake_setup
def cmake_setup(): """ attempt to build using CMake >= 3 """ cmake_exe = shutil.which('cmake') if not cmake_exe: raise FileNotFoundError('CMake not available') wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else [] subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)], cwd=BINDIR) ret = subprocess.run([cmake_exe, '--build', str(BINDIR)], stderr=subprocess.PIPE, universal_newlines=True) result(ret)
python
def cmake_setup(): """ attempt to build using CMake >= 3 """ cmake_exe = shutil.which('cmake') if not cmake_exe: raise FileNotFoundError('CMake not available') wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH="CMAKE_SH-NOTFOUND'] if os.name == 'nt' else [] subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)], cwd=BINDIR) ret = subprocess.run([cmake_exe, '--build', str(BINDIR)], stderr=subprocess.PIPE, universal_newlines=True) result(ret)
[ "def", "cmake_setup", "(", ")", ":", "cmake_exe", "=", "shutil", ".", "which", "(", "'cmake'", ")", "if", "not", "cmake_exe", ":", "raise", "FileNotFoundError", "(", "'CMake not available'", ")", "wopts", "=", "[", "'-G'", ",", "'MinGW Makefiles'", ",", "'-DCMAKE_SH=\"CMAKE_SH-NOTFOUND'", "]", "if", "os", ".", "name", "==", "'nt'", "else", "[", "]", "subprocess", ".", "check_call", "(", "[", "cmake_exe", "]", "+", "wopts", "+", "[", "str", "(", "SRCDIR", ")", "]", ",", "cwd", "=", "BINDIR", ")", "ret", "=", "subprocess", ".", "run", "(", "[", "cmake_exe", ",", "'--build'", ",", "str", "(", "BINDIR", ")", "]", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "universal_newlines", "=", "True", ")", "result", "(", "ret", ")" ]
attempt to build using CMake >= 3
[ "attempt", "to", "build", "using", "CMake", ">", "=", "3" ]
13a283ec02679ab74672f284ba68a7a8f896dc6f
https://github.com/scivision/msise00/blob/13a283ec02679ab74672f284ba68a7a8f896dc6f/msise00/build.py#L27-L44
train
scivision/msise00
msise00/build.py
meson_setup
def meson_setup(): """ attempt to build with Meson + Ninja """ meson_exe = shutil.which('meson') ninja_exe = shutil.which('ninja') if not meson_exe or not ninja_exe: raise FileNotFoundError('Meson or Ninja not available') if not (BINDIR / 'build.ninja').is_file(): subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR) ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE, universal_newlines=True) result(ret)
python
def meson_setup(): """ attempt to build with Meson + Ninja """ meson_exe = shutil.which('meson') ninja_exe = shutil.which('ninja') if not meson_exe or not ninja_exe: raise FileNotFoundError('Meson or Ninja not available') if not (BINDIR / 'build.ninja').is_file(): subprocess.check_call([meson_exe, str(SRCDIR)], cwd=BINDIR) ret = subprocess.run(ninja_exe, cwd=BINDIR, stderr=subprocess.PIPE, universal_newlines=True) result(ret)
[ "def", "meson_setup", "(", ")", ":", "meson_exe", "=", "shutil", ".", "which", "(", "'meson'", ")", "ninja_exe", "=", "shutil", ".", "which", "(", "'ninja'", ")", "if", "not", "meson_exe", "or", "not", "ninja_exe", ":", "raise", "FileNotFoundError", "(", "'Meson or Ninja not available'", ")", "if", "not", "(", "BINDIR", "/", "'build.ninja'", ")", ".", "is_file", "(", ")", ":", "subprocess", ".", "check_call", "(", "[", "meson_exe", ",", "str", "(", "SRCDIR", ")", "]", ",", "cwd", "=", "BINDIR", ")", "ret", "=", "subprocess", ".", "run", "(", "ninja_exe", ",", "cwd", "=", "BINDIR", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "universal_newlines", "=", "True", ")", "result", "(", "ret", ")" ]
attempt to build with Meson + Ninja
[ "attempt", "to", "build", "with", "Meson", "+", "Ninja" ]
13a283ec02679ab74672f284ba68a7a8f896dc6f
https://github.com/scivision/msise00/blob/13a283ec02679ab74672f284ba68a7a8f896dc6f/msise00/build.py#L47-L63
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.add_term_occurrence
def add_term_occurrence(self, term, document): """ Adds an occurrence of the term in the specified document. """ if document not in self._documents: self._documents[document] = 0 if term not in self._terms: if self._freeze: return else: self._terms[term] = collections.Counter() if document not in self._terms[term]: self._terms[term][document] = 0 self._documents[document] += 1 self._terms[term][document] += 1
python
def add_term_occurrence(self, term, document): """ Adds an occurrence of the term in the specified document. """ if document not in self._documents: self._documents[document] = 0 if term not in self._terms: if self._freeze: return else: self._terms[term] = collections.Counter() if document not in self._terms[term]: self._terms[term][document] = 0 self._documents[document] += 1 self._terms[term][document] += 1
[ "def", "add_term_occurrence", "(", "self", ",", "term", ",", "document", ")", ":", "if", "document", "not", "in", "self", ".", "_documents", ":", "self", ".", "_documents", "[", "document", "]", "=", "0", "if", "term", "not", "in", "self", ".", "_terms", ":", "if", "self", ".", "_freeze", ":", "return", "else", ":", "self", ".", "_terms", "[", "term", "]", "=", "collections", ".", "Counter", "(", ")", "if", "document", "not", "in", "self", ".", "_terms", "[", "term", "]", ":", "self", ".", "_terms", "[", "term", "]", "[", "document", "]", "=", "0", "self", ".", "_documents", "[", "document", "]", "+=", "1", "self", ".", "_terms", "[", "term", "]", "[", "document", "]", "+=", "1" ]
Adds an occurrence of the term in the specified document.
[ "Adds", "an", "occurrence", "of", "the", "term", "in", "the", "specified", "document", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L69-L86
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_total_term_frequency
def get_total_term_frequency(self, term): """ Gets the frequency of the specified term in the entire corpus added to the HashedIndex. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) return sum(self._terms[term].values())
python
def get_total_term_frequency(self, term): """ Gets the frequency of the specified term in the entire corpus added to the HashedIndex. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) return sum(self._terms[term].values())
[ "def", "get_total_term_frequency", "(", "self", ",", "term", ")", ":", "if", "term", "not", "in", "self", ".", "_terms", ":", "raise", "IndexError", "(", "TERM_DOES_NOT_EXIST", ")", "return", "sum", "(", "self", ".", "_terms", "[", "term", "]", ".", "values", "(", ")", ")" ]
Gets the frequency of the specified term in the entire corpus added to the HashedIndex.
[ "Gets", "the", "frequency", "of", "the", "specified", "term", "in", "the", "entire", "corpus", "added", "to", "the", "HashedIndex", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L88-L96
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_term_frequency
def get_term_frequency(self, term, document, normalized=False): """ Returns the frequency of the term specified in the document. """ if document not in self._documents: raise IndexError(DOCUMENT_DOES_NOT_EXIST) if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) result = self._terms[term].get(document, 0) if normalized: result /= self.get_document_length(document) return float(result)
python
def get_term_frequency(self, term, document, normalized=False): """ Returns the frequency of the term specified in the document. """ if document not in self._documents: raise IndexError(DOCUMENT_DOES_NOT_EXIST) if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) result = self._terms[term].get(document, 0) if normalized: result /= self.get_document_length(document) return float(result)
[ "def", "get_term_frequency", "(", "self", ",", "term", ",", "document", ",", "normalized", "=", "False", ")", ":", "if", "document", "not", "in", "self", ".", "_documents", ":", "raise", "IndexError", "(", "DOCUMENT_DOES_NOT_EXIST", ")", "if", "term", "not", "in", "self", ".", "_terms", ":", "raise", "IndexError", "(", "TERM_DOES_NOT_EXIST", ")", "result", "=", "self", ".", "_terms", "[", "term", "]", ".", "get", "(", "document", ",", "0", ")", "if", "normalized", ":", "result", "/=", "self", ".", "get_document_length", "(", "document", ")", "return", "float", "(", "result", ")" ]
Returns the frequency of the term specified in the document.
[ "Returns", "the", "frequency", "of", "the", "term", "specified", "in", "the", "document", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L98-L112
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_document_frequency
def get_document_frequency(self, term): """ Returns the number of documents the specified term appears in. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return len(self._terms[term])
python
def get_document_frequency(self, term): """ Returns the number of documents the specified term appears in. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return len(self._terms[term])
[ "def", "get_document_frequency", "(", "self", ",", "term", ")", ":", "if", "term", "not", "in", "self", ".", "_terms", ":", "raise", "IndexError", "(", "TERM_DOES_NOT_EXIST", ")", "else", ":", "return", "len", "(", "self", ".", "_terms", "[", "term", "]", ")" ]
Returns the number of documents the specified term appears in.
[ "Returns", "the", "number", "of", "documents", "the", "specified", "term", "appears", "in", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L114-L121
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_document_length
def get_document_length(self, document): """ Returns the number of terms found within the specified document. """ if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
python
def get_document_length(self, document): """ Returns the number of terms found within the specified document. """ if document in self._documents: return self._documents[document] else: raise IndexError(DOCUMENT_DOES_NOT_EXIST)
[ "def", "get_document_length", "(", "self", ",", "document", ")", ":", "if", "document", "in", "self", ".", "_documents", ":", "return", "self", ".", "_documents", "[", "document", "]", "else", ":", "raise", "IndexError", "(", "DOCUMENT_DOES_NOT_EXIST", ")" ]
Returns the number of terms found within the specified document.
[ "Returns", "the", "number", "of", "terms", "found", "within", "the", "specified", "document", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L123-L130
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_documents
def get_documents(self, term): """ Returns all documents related to the specified term in the form of a Counter object. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return self._terms[term]
python
def get_documents(self, term): """ Returns all documents related to the specified term in the form of a Counter object. """ if term not in self._terms: raise IndexError(TERM_DOES_NOT_EXIST) else: return self._terms[term]
[ "def", "get_documents", "(", "self", ",", "term", ")", ":", "if", "term", "not", "in", "self", ".", "_terms", ":", "raise", "IndexError", "(", "TERM_DOES_NOT_EXIST", ")", "else", ":", "return", "self", ".", "_terms", "[", "term", "]" ]
Returns all documents related to the specified term in the form of a Counter object.
[ "Returns", "all", "documents", "related", "to", "the", "specified", "term", "in", "the", "form", "of", "a", "Counter", "object", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L132-L140
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.get_tfidf
def get_tfidf(self, term, document, normalized=False): """ Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. """ tf = self.get_term_frequency(term, document) # Speeds up performance by avoiding extra calculations if tf != 0.0: # Add 1 to document frequency to prevent divide by 0 # (Laplacian Correction) df = 1 + self.get_document_frequency(term) n = 2 + len(self._documents) if normalized: tf /= self.get_document_length(document) return tf * math.log10(n / df) else: return 0.0
python
def get_tfidf(self, term, document, normalized=False): """ Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length. """ tf = self.get_term_frequency(term, document) # Speeds up performance by avoiding extra calculations if tf != 0.0: # Add 1 to document frequency to prevent divide by 0 # (Laplacian Correction) df = 1 + self.get_document_frequency(term) n = 2 + len(self._documents) if normalized: tf /= self.get_document_length(document) return tf * math.log10(n / df) else: return 0.0
[ "def", "get_tfidf", "(", "self", ",", "term", ",", "document", ",", "normalized", "=", "False", ")", ":", "tf", "=", "self", ".", "get_term_frequency", "(", "term", ",", "document", ")", "# Speeds up performance by avoiding extra calculations", "if", "tf", "!=", "0.0", ":", "# Add 1 to document frequency to prevent divide by 0", "# (Laplacian Correction)", "df", "=", "1", "+", "self", ".", "get_document_frequency", "(", "term", ")", "n", "=", "2", "+", "len", "(", "self", ".", "_documents", ")", "if", "normalized", ":", "tf", "/=", "self", ".", "get_document_length", "(", "document", ")", "return", "tf", "*", "math", ".", "log10", "(", "n", "/", "df", ")", "else", ":", "return", "0.0" ]
Returns the Term-Frequency Inverse-Document-Frequency value for the given term in the specified document. If normalized is True, term frequency will be divided by the document length.
[ "Returns", "the", "Term", "-", "Frequency", "Inverse", "-", "Document", "-", "Frequency", "value", "for", "the", "given", "term", "in", "the", "specified", "document", ".", "If", "normalized", "is", "True", "term", "frequency", "will", "be", "divided", "by", "the", "document", "length", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L151-L171
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.generate_document_vector
def generate_document_vector(self, doc, mode='tfidf'): """ Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency """ if mode == 'tfidf': selected_function = HashedIndex.get_tfidf elif mode == 'ntfidf': selected_function = functools.partial(HashedIndex.get_tfidf, normalized=True) elif mode == 'tf': selected_function = HashedIndex.get_term_frequency elif mode == 'ntf': selected_function = functools.partial(HashedIndex.get_term_frequency, normalized=True) elif hasattr(mode, '__call__'): selected_function = mode else: raise ValueError('Unexpected mode: %s', mode) result = [] for term in self._terms: result.append(selected_function(self, term, doc)) return result
python
def generate_document_vector(self, doc, mode='tfidf'): """ Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency """ if mode == 'tfidf': selected_function = HashedIndex.get_tfidf elif mode == 'ntfidf': selected_function = functools.partial(HashedIndex.get_tfidf, normalized=True) elif mode == 'tf': selected_function = HashedIndex.get_term_frequency elif mode == 'ntf': selected_function = functools.partial(HashedIndex.get_term_frequency, normalized=True) elif hasattr(mode, '__call__'): selected_function = mode else: raise ValueError('Unexpected mode: %s', mode) result = [] for term in self._terms: result.append(selected_function(self, term, doc)) return result
[ "def", "generate_document_vector", "(", "self", ",", "doc", ",", "mode", "=", "'tfidf'", ")", ":", "if", "mode", "==", "'tfidf'", ":", "selected_function", "=", "HashedIndex", ".", "get_tfidf", "elif", "mode", "==", "'ntfidf'", ":", "selected_function", "=", "functools", ".", "partial", "(", "HashedIndex", ".", "get_tfidf", ",", "normalized", "=", "True", ")", "elif", "mode", "==", "'tf'", ":", "selected_function", "=", "HashedIndex", ".", "get_term_frequency", "elif", "mode", "==", "'ntf'", ":", "selected_function", "=", "functools", ".", "partial", "(", "HashedIndex", ".", "get_term_frequency", ",", "normalized", "=", "True", ")", "elif", "hasattr", "(", "mode", ",", "'__call__'", ")", ":", "selected_function", "=", "mode", "else", ":", "raise", "ValueError", "(", "'Unexpected mode: %s'", ",", "mode", ")", "result", "=", "[", "]", "for", "term", "in", "self", ".", "_terms", ":", "result", ".", "append", "(", "selected_function", "(", "self", ",", "term", ",", "doc", ")", ")", "return", "result" ]
Returns a representation of the specified document as a feature vector weighted according the mode specified (by default tf-dif). A custom weighting function can also be passed which receives the hashedindex instance, the selected term and document as parameters. The result will be returned in the form of a list. This can be converted into a numpy array if required using the `np.asarray` method Available built-in modes: * tfidf: Term Frequency Inverse Document Frequency * ntfidf: Normalized Term Frequency Inverse Document Frequency * tf: Term Frequency * ntf: Normalized Term Frequency
[ "Returns", "a", "representation", "of", "the", "specified", "document", "as", "a", "feature", "vector", "weighted", "according", "the", "mode", "specified", "(", "by", "default", "tf", "-", "dif", ")", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L179-L212
train
MichaelAquilina/hashedindex
hashedindex/__init__.py
HashedIndex.generate_feature_matrix
def generate_feature_matrix(self, mode='tfidf'): """ Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. """ result = [] for doc in self._documents: result.append(self.generate_document_vector(doc, mode)) return result
python
def generate_feature_matrix(self, mode='tfidf'): """ Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method. """ result = [] for doc in self._documents: result.append(self.generate_document_vector(doc, mode)) return result
[ "def", "generate_feature_matrix", "(", "self", ",", "mode", "=", "'tfidf'", ")", ":", "result", "=", "[", "]", "for", "doc", "in", "self", ".", "_documents", ":", "result", ".", "append", "(", "self", ".", "generate_document_vector", "(", "doc", ",", "mode", ")", ")", "return", "result" ]
Returns a feature matrix in the form of a list of lists which represents the terms and documents in this Inverted Index using the tf-idf weighting by default. The term counts in each document can alternatively be used by specifying scheme='count'. A custom weighting function can also be passed which receives a term and document as parameters. The size of the matrix is equal to m x n where m is the number of documents and n is the number of terms. The list-of-lists format returned by this function can be very easily converted to a numpy matrix if required using the `np.as_matrix` method.
[ "Returns", "a", "feature", "matrix", "in", "the", "form", "of", "a", "list", "of", "lists", "which", "represents", "the", "terms", "and", "documents", "in", "this", "Inverted", "Index", "using", "the", "tf", "-", "idf", "weighting", "by", "default", ".", "The", "term", "counts", "in", "each", "document", "can", "alternatively", "be", "used", "by", "specifying", "scheme", "=", "count", "." ]
5a84dcd6c697ea04162cf7b2683fa2723845b51c
https://github.com/MichaelAquilina/hashedindex/blob/5a84dcd6c697ea04162cf7b2683fa2723845b51c/hashedindex/__init__.py#L214-L236
train
bkeating/python-payflowpro
payflowpro/client.py
find_class_in_list
def find_class_in_list(klass, lst): """ Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. """ filtered = list(filter(lambda x: x.__class__ == klass, lst)) if filtered: return filtered[0] return None
python
def find_class_in_list(klass, lst): """ Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present. """ filtered = list(filter(lambda x: x.__class__ == klass, lst)) if filtered: return filtered[0] return None
[ "def", "find_class_in_list", "(", "klass", ",", "lst", ")", ":", "filtered", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", ".", "__class__", "==", "klass", ",", "lst", ")", ")", "if", "filtered", ":", "return", "filtered", "[", "0", "]", "return", "None" ]
Returns the first occurrence of an instance of type `klass` in the given list, or None if no such instance is present.
[ "Returns", "the", "first", "occurrence", "of", "an", "instance", "of", "type", "klass", "in", "the", "given", "list", "or", "None", "if", "no", "such", "instance", "is", "present", "." ]
e74fc85135f171caa28277196fdcf7c7481ff298
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/client.py#L400-L408
train
bkeating/python-payflowpro
payflowpro/client.py
find_classes_in_list
def find_classes_in_list(klasses, lst): """ Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>) """ if not isinstance(klasses, list): klasses = [klasses] return tuple(map(lambda klass: find_class_in_list(klass, lst), klasses))
python
def find_classes_in_list(klasses, lst): """ Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>) """ if not isinstance(klasses, list): klasses = [klasses] return tuple(map(lambda klass: find_class_in_list(klass, lst), klasses))
[ "def", "find_classes_in_list", "(", "klasses", ",", "lst", ")", ":", "if", "not", "isinstance", "(", "klasses", ",", "list", ")", ":", "klasses", "=", "[", "klasses", "]", "return", "tuple", "(", "map", "(", "lambda", "klass", ":", "find_class_in_list", "(", "klass", ",", "lst", ")", ",", "klasses", ")", ")" ]
Returns a tuple containing an entry corresponding to each of the requested class types, where the entry is either the first object instance of that type or None of no such instances are available. Example Usage: find_classes_in_list( [Address, Response], [<classes.Response...>, <classes.Amount...>, <classes.Address...>]) Produces: (<classes.Address...>, <classes.Response...>)
[ "Returns", "a", "tuple", "containing", "an", "entry", "corresponding", "to", "each", "of", "the", "requested", "class", "types", "where", "the", "entry", "is", "either", "the", "first", "object", "instance", "of", "that", "type", "or", "None", "of", "no", "such", "instances", "are", "available", ".", "Example", "Usage", ":", "find_classes_in_list", "(", "[", "Address", "Response", "]", "[", "<classes", ".", "Response", "...", ">", "<classes", ".", "Amount", "...", ">", "<classes", ".", "Address", "...", ">", "]", ")", "Produces", ":", "(", "<classes", ".", "Address", "...", ">", "<classes", ".", "Response", "...", ">", ")" ]
e74fc85135f171caa28277196fdcf7c7481ff298
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/client.py#L410-L427
train
bkeating/python-payflowpro
payflowpro/client.py
PayflowProClient._build_parmlist
def _build_parmlist(self, parameters): """ Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. """ args = [] for key, value in parameters.items(): if not value is None: # We always use the explicit-length keyname format, to reduce the chance # of requests failing due to unusual characters in parameter values. try: classinfo = unicode except NameError: classinfo = str if isinstance(value, classinfo): key = '%s[%d]' % (key.upper(), len(value.encode('utf-8'))) else: key = '%s[%d]' % (key.upper(), len(str(value))) args.append('%s=%s' % (key, value)) args.sort() parmlist = '&'.join(args) return parmlist
python
def _build_parmlist(self, parameters): """ Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API. """ args = [] for key, value in parameters.items(): if not value is None: # We always use the explicit-length keyname format, to reduce the chance # of requests failing due to unusual characters in parameter values. try: classinfo = unicode except NameError: classinfo = str if isinstance(value, classinfo): key = '%s[%d]' % (key.upper(), len(value.encode('utf-8'))) else: key = '%s[%d]' % (key.upper(), len(str(value))) args.append('%s=%s' % (key, value)) args.sort() parmlist = '&'.join(args) return parmlist
[ "def", "_build_parmlist", "(", "self", ",", "parameters", ")", ":", "args", "=", "[", "]", "for", "key", ",", "value", "in", "parameters", ".", "items", "(", ")", ":", "if", "not", "value", "is", "None", ":", "# We always use the explicit-length keyname format, to reduce the chance", "# of requests failing due to unusual characters in parameter values.", "try", ":", "classinfo", "=", "unicode", "except", "NameError", ":", "classinfo", "=", "str", "if", "isinstance", "(", "value", ",", "classinfo", ")", ":", "key", "=", "'%s[%d]'", "%", "(", "key", ".", "upper", "(", ")", ",", "len", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")", ")", "else", ":", "key", "=", "'%s[%d]'", "%", "(", "key", ".", "upper", "(", ")", ",", "len", "(", "str", "(", "value", ")", ")", ")", "args", ".", "append", "(", "'%s=%s'", "%", "(", "key", ",", "value", ")", ")", "args", ".", "sort", "(", ")", "parmlist", "=", "'&'", ".", "join", "(", "args", ")", "return", "parmlist" ]
Converts a dictionary of name and value pairs into a PARMLIST string value acceptable to the Payflow Pro API.
[ "Converts", "a", "dictionary", "of", "name", "and", "value", "pairs", "into", "a", "PARMLIST", "string", "value", "acceptable", "to", "the", "Payflow", "Pro", "API", "." ]
e74fc85135f171caa28277196fdcf7c7481ff298
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/client.py#L106-L131
train
bkeating/python-payflowpro
payflowpro/client.py
PayflowProClient._parse_parmlist
def _parse_parmlist(self, parmlist): """ Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") """ parmlist = "&" + parmlist name_re = re.compile(r'\&([A-Z0-9_]+)(\[\d+\])?=') results = {} offset = 0 match = name_re.search(parmlist, offset) while match: name, len_suffix = match.groups() offset = match.end() if len_suffix: val_len = int(len_suffix[1:-1]) else: next_match = name_re.search(parmlist, offset) if next_match: val_len = next_match.start() - match.end() else: # At end of parmlist val_len = len(parmlist) - match.end() value = parmlist[match.end() : match.end() + val_len] results[name.lower()] = value match = name_re.search(parmlist, offset) return results
python
def _parse_parmlist(self, parmlist): """ Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7") """ parmlist = "&" + parmlist name_re = re.compile(r'\&([A-Z0-9_]+)(\[\d+\])?=') results = {} offset = 0 match = name_re.search(parmlist, offset) while match: name, len_suffix = match.groups() offset = match.end() if len_suffix: val_len = int(len_suffix[1:-1]) else: next_match = name_re.search(parmlist, offset) if next_match: val_len = next_match.start() - match.end() else: # At end of parmlist val_len = len(parmlist) - match.end() value = parmlist[match.end() : match.end() + val_len] results[name.lower()] = value match = name_re.search(parmlist, offset) return results
[ "def", "_parse_parmlist", "(", "self", ",", "parmlist", ")", ":", "parmlist", "=", "\"&\"", "+", "parmlist", "name_re", "=", "re", ".", "compile", "(", "r'\\&([A-Z0-9_]+)(\\[\\d+\\])?='", ")", "results", "=", "{", "}", "offset", "=", "0", "match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "while", "match", ":", "name", ",", "len_suffix", "=", "match", ".", "groups", "(", ")", "offset", "=", "match", ".", "end", "(", ")", "if", "len_suffix", ":", "val_len", "=", "int", "(", "len_suffix", "[", "1", ":", "-", "1", "]", ")", "else", ":", "next_match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "if", "next_match", ":", "val_len", "=", "next_match", ".", "start", "(", ")", "-", "match", ".", "end", "(", ")", "else", ":", "# At end of parmlist", "val_len", "=", "len", "(", "parmlist", ")", "-", "match", ".", "end", "(", ")", "value", "=", "parmlist", "[", "match", ".", "end", "(", ")", ":", "match", ".", "end", "(", ")", "+", "val_len", "]", "results", "[", "name", ".", "lower", "(", ")", "]", "=", "value", "match", "=", "name_re", ".", "search", "(", "parmlist", ",", "offset", ")", "return", "results" ]
Parses a PARMLIST string into a dictionary of name and value pairs. The parsing is complicated by the following: - parameter keynames may or may not include a length specification - delimiter characters (=, &) may appear inside parameter values, provided the parameter has an explicit length. For example, the following parmlist values are possible: A=B&C=D A[1]=B&C[1]=D A=B&C[1]=D A[3]=B&B&C[1]=D (Here, the value of A is "B&B") A[1]=B&C[3]=D=7 (Here, the value of C is "D=7")
[ "Parses", "a", "PARMLIST", "string", "into", "a", "dictionary", "of", "name", "and", "value", "pairs", ".", "The", "parsing", "is", "complicated", "by", "the", "following", ":", "-", "parameter", "keynames", "may", "or", "may", "not", "include", "a", "length", "specification", "-", "delimiter", "characters", "(", "=", "&", ")", "may", "appear", "inside", "parameter", "values", "provided", "the", "parameter", "has", "an", "explicit", "length", ".", "For", "example", "the", "following", "parmlist", "values", "are", "possible", ":", "A", "=", "B&C", "=", "D", "A", "[", "1", "]", "=", "B&C", "[", "1", "]", "=", "D", "A", "=", "B&C", "[", "1", "]", "=", "D", "A", "[", "3", "]", "=", "B&B&C", "[", "1", "]", "=", "D", "(", "Here", "the", "value", "of", "A", "is", "B&B", ")", "A", "[", "1", "]", "=", "B&C", "[", "3", "]", "=", "D", "=", "7", "(", "Here", "the", "value", "of", "C", "is", "D", "=", "7", ")" ]
e74fc85135f171caa28277196fdcf7c7481ff298
https://github.com/bkeating/python-payflowpro/blob/e74fc85135f171caa28277196fdcf7c7481ff298/payflowpro/client.py#L133-L174
train
damianbraun/nominatim
nominatim/nominatim.py
NominatimRequest.request
def request(self, url): """ Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed """ self.logger.debug('url:\n' + url) try: response = urlopen(url) return json.loads(response.read().decode('utf-8')) except URLError: self.logger.info('Server connection problem') except Exception: self.logger.info('Server format problem')
python
def request(self, url): """ Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed """ self.logger.debug('url:\n' + url) try: response = urlopen(url) return json.loads(response.read().decode('utf-8')) except URLError: self.logger.info('Server connection problem') except Exception: self.logger.info('Server format problem')
[ "def", "request", "(", "self", ",", "url", ")", ":", "self", ".", "logger", ".", "debug", "(", "'url:\\n'", "+", "url", ")", "try", ":", "response", "=", "urlopen", "(", "url", ")", "return", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")", "except", "URLError", ":", "self", ".", "logger", ".", "info", "(", "'Server connection problem'", ")", "except", "Exception", ":", "self", ".", "logger", ".", "info", "(", "'Server format problem'", ")" ]
Send a http request to the given *url*, try to decode the reply assuming it's JSON in UTF-8, and return the result :returns: Decoded result, or None in case of an error :rtype: mixed
[ "Send", "a", "http", "request", "to", "the", "given", "*", "url", "*", "try", "to", "decode", "the", "reply", "assuming", "it", "s", "JSON", "in", "UTF", "-", "8", "and", "return", "the", "result" ]
d3cc40e1d31755aabf7302a617ab6e982eeb9b4f
https://github.com/damianbraun/nominatim/blob/d3cc40e1d31755aabf7302a617ab6e982eeb9b4f/nominatim/nominatim.py#L58-L73
train
damianbraun/nominatim
nominatim/nominatim.py
Nominatim.query
def query(self, address, acceptlanguage=None, limit=20, countrycodes=None): """ Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None """ url = self.url + '&q=' + quote_plus(address) if acceptlanguage: url += '&accept-language=' + acceptlanguage if limit: url += '&limit=' + str(limit) if countrycodes: url += '&countrycodes=' + ','.join(countrycodes) return self.request(url)
python
def query(self, address, acceptlanguage=None, limit=20, countrycodes=None): """ Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None """ url = self.url + '&q=' + quote_plus(address) if acceptlanguage: url += '&accept-language=' + acceptlanguage if limit: url += '&limit=' + str(limit) if countrycodes: url += '&countrycodes=' + ','.join(countrycodes) return self.request(url)
[ "def", "query", "(", "self", ",", "address", ",", "acceptlanguage", "=", "None", ",", "limit", "=", "20", ",", "countrycodes", "=", "None", ")", ":", "url", "=", "self", ".", "url", "+", "'&q='", "+", "quote_plus", "(", "address", ")", "if", "acceptlanguage", ":", "url", "+=", "'&accept-language='", "+", "acceptlanguage", "if", "limit", ":", "url", "+=", "'&limit='", "+", "str", "(", "limit", ")", "if", "countrycodes", ":", "url", "+=", "'&countrycodes='", "+", "','", ".", "join", "(", "countrycodes", ")", "return", "self", ".", "request", "(", "url", ")" ]
Issue a geocoding query for *address* to the Nominatim instance and return the decoded results :param address: a query string with an address or presumed parts of an address :type address: str or (if python2) unicode :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param limit: limit the number of results :type limit: int or None :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None
[ "Issue", "a", "geocoding", "query", "for", "*", "address", "*", "to", "the", "Nominatim", "instance", "and", "return", "the", "decoded", "results" ]
d3cc40e1d31755aabf7302a617ab6e982eeb9b4f
https://github.com/damianbraun/nominatim/blob/d3cc40e1d31755aabf7302a617ab6e982eeb9b4f/nominatim/nominatim.py#L92-L119
train
damianbraun/nominatim
nominatim/nominatim.py
NominatimReverse.query
def query(self, lat=None, lon=None, osm_id=None, osm_type=None, acceptlanguage='', zoom=18): """ Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value """ url = self.url if osm_id is not None and osm_type not in ('N', 'W', 'R'): raise NominatimException('invalid osm_type') if osm_id is not None and osm_type is not None: url += '&osm_id=' + osm_id + '&osm_type=' + osm_type elif lat is not None and lon is not None: url += '&lat=' + str(lat) + '&lon=' + str(lon) else: return None if acceptlanguage: url += '&accept-language=' + acceptlanguage if zoom in zoom_aliases: zoom = zoom_aliases[zoom] if not isinstance(zoom, int) or zoom < 0 or zoom > 18: raise NominatimException('zoom must effectively be betwen 0 and 18') url +='&zoom=' + str(zoom) return self.request(url)
python
def query(self, lat=None, lon=None, osm_id=None, osm_type=None, acceptlanguage='', zoom=18): """ Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value """ url = self.url if osm_id is not None and osm_type not in ('N', 'W', 'R'): raise NominatimException('invalid osm_type') if osm_id is not None and osm_type is not None: url += '&osm_id=' + osm_id + '&osm_type=' + osm_type elif lat is not None and lon is not None: url += '&lat=' + str(lat) + '&lon=' + str(lon) else: return None if acceptlanguage: url += '&accept-language=' + acceptlanguage if zoom in zoom_aliases: zoom = zoom_aliases[zoom] if not isinstance(zoom, int) or zoom < 0 or zoom > 18: raise NominatimException('zoom must effectively be betwen 0 and 18') url +='&zoom=' + str(zoom) return self.request(url)
[ "def", "query", "(", "self", ",", "lat", "=", "None", ",", "lon", "=", "None", ",", "osm_id", "=", "None", ",", "osm_type", "=", "None", ",", "acceptlanguage", "=", "''", ",", "zoom", "=", "18", ")", ":", "url", "=", "self", ".", "url", "if", "osm_id", "is", "not", "None", "and", "osm_type", "not", "in", "(", "'N'", ",", "'W'", ",", "'R'", ")", ":", "raise", "NominatimException", "(", "'invalid osm_type'", ")", "if", "osm_id", "is", "not", "None", "and", "osm_type", "is", "not", "None", ":", "url", "+=", "'&osm_id='", "+", "osm_id", "+", "'&osm_type='", "+", "osm_type", "elif", "lat", "is", "not", "None", "and", "lon", "is", "not", "None", ":", "url", "+=", "'&lat='", "+", "str", "(", "lat", ")", "+", "'&lon='", "+", "str", "(", "lon", ")", "else", ":", "return", "None", "if", "acceptlanguage", ":", "url", "+=", "'&accept-language='", "+", "acceptlanguage", "if", "zoom", "in", "zoom_aliases", ":", "zoom", "=", "zoom_aliases", "[", "zoom", "]", "if", "not", "isinstance", "(", "zoom", ",", "int", ")", "or", "zoom", "<", "0", "or", "zoom", ">", "18", ":", "raise", "NominatimException", "(", "'zoom must effectively be betwen 0 and 18'", ")", "url", "+=", "'&zoom='", "+", "str", "(", "zoom", ")", "return", "self", ".", "request", "(", "url", ")" ]
Issue a reverse geocoding query for a place given by *lat* and *lon*, or by *osm_id* and *osm_type* to the Nominatim instance and return the decoded results :param lat: the geograpical latitude of the place :param lon: the geograpical longitude of the place :param osm_id: openstreetmap identifier osm_id :type osm_id: str :param osm_type: openstreetmap type osm_type :type osm_type: str :param acceptlanguage: rfc2616 language code :type acceptlanguage: str or None :param zoom: zoom factor between from 0 to 18 :type zoom: int or None or a key in :data:`zoom_aliases` :param countrycodes: restrict the search to countries given by their ISO 3166-1alpha2 codes (cf. https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 ) :type countrycodes: str iterable :returns: a list of search results (each a dict) :rtype: list or None :raise: NominatimException if invalid zoom value
[ "Issue", "a", "reverse", "geocoding", "query", "for", "a", "place", "given", "by", "*", "lat", "*", "and", "*", "lon", "*", "or", "by", "*", "osm_id", "*", "and", "*", "osm_type", "*", "to", "the", "Nominatim", "instance", "and", "return", "the", "decoded", "results" ]
d3cc40e1d31755aabf7302a617ab6e982eeb9b4f
https://github.com/damianbraun/nominatim/blob/d3cc40e1d31755aabf7302a617ab6e982eeb9b4f/nominatim/nominatim.py#L139-L180
train
darothen/xbpch
xbpch/grid.py
CTMGrid.from_model
def from_model(cls, model_name, **kwargs): """ Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model). """ settings = _get_model_info(model_name) model = settings.pop('model_name') for k, v in list(kwargs.items()): if k in ('resolution', 'Psurf'): settings[k] = v return cls(model, **settings)
python
def from_model(cls, model_name, **kwargs): """ Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model). """ settings = _get_model_info(model_name) model = settings.pop('model_name') for k, v in list(kwargs.items()): if k in ('resolution', 'Psurf'): settings[k] = v return cls(model, **settings)
[ "def", "from_model", "(", "cls", ",", "model_name", ",", "*", "*", "kwargs", ")", ":", "settings", "=", "_get_model_info", "(", "model_name", ")", "model", "=", "settings", ".", "pop", "(", "'model_name'", ")", "for", "k", ",", "v", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "if", "k", "in", "(", "'resolution'", ",", "'Psurf'", ")", ":", "settings", "[", "k", "]", "=", "v", "return", "cls", "(", "model", ",", "*", "*", "settings", ")" ]
Define a grid using the specifications of a given model. Parameters ---------- model_name : string Name the model (see :func:`get_supported_models` for available model names). Supports multiple formats (e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5'). **kwargs : string Parameters that override the model or default grid settings (See Other Parameters below). Returns ------- A :class:`CTMGrid` object. Other Parameters ---------------- resolution : (float, float) Horizontal grid resolution (lon, lat) or (DI, DJ) [degrees] Psurf : float Average surface pressure [hPa] (default: 1013.15) Notes ----- Regridded vertical models may have several valid names (e.g., 'GEOS5_47L' and 'GEOS5_REDUCED' refer to the same model).
[ "Define", "a", "grid", "using", "the", "specifications", "of", "a", "given", "model", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/grid.py#L143-L180
train
darothen/xbpch
xbpch/grid.py
CTMGrid.copy_from_model
def copy_from_model(cls, model_name, reference, **kwargs): """ Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object. """ if isinstance(reference, cls): settings = reference.__dict__.copy() settings.pop('model') else: settings = _get_model_info(reference) settings.pop('model_name') settings.update(kwargs) settings['reference'] = reference return cls(model_name, **settings)
python
def copy_from_model(cls, model_name, reference, **kwargs): """ Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object. """ if isinstance(reference, cls): settings = reference.__dict__.copy() settings.pop('model') else: settings = _get_model_info(reference) settings.pop('model_name') settings.update(kwargs) settings['reference'] = reference return cls(model_name, **settings)
[ "def", "copy_from_model", "(", "cls", ",", "model_name", ",", "reference", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "reference", ",", "cls", ")", ":", "settings", "=", "reference", ".", "__dict__", ".", "copy", "(", ")", "settings", ".", "pop", "(", "'model'", ")", "else", ":", "settings", "=", "_get_model_info", "(", "reference", ")", "settings", ".", "pop", "(", "'model_name'", ")", "settings", ".", "update", "(", "kwargs", ")", "settings", "[", "'reference'", "]", "=", "reference", "return", "cls", "(", "model_name", ",", "*", "*", "settings", ")" ]
Set-up a user-defined grid using specifications of a reference grid model. Parameters ---------- model_name : string name of the user-defined grid model. reference : string or :class:`CTMGrid` instance Name of the reference model (see :func:`get_supported_models`), or a :class:`CTMGrid` object from which grid set-up is copied. **kwargs Any set-up parameter which will override the settings of the reference model (see :class:`CTMGrid` parameters). Returns ------- A :class:`CTMGrid` object.
[ "Set", "-", "up", "a", "user", "-", "defined", "grid", "using", "specifications", "of", "a", "reference", "grid", "model", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/grid.py#L183-L214
train
darothen/xbpch
xbpch/grid.py
CTMGrid.get_layers
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs): """ Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details. """ Psurf = np.asarray(Psurf) output_ndims = Psurf.ndim + 1 if output_ndims > 3: raise ValueError("`Psurf` argument must be a float or an array" " with <= 2 dimensions (or None)") # Compute all variables: takes not much memory, fast # and better for code reading SIGe = None SIGc = None ETAe = None ETAc = None if self.hybrid: try: Ap = broadcast_1d_array(self.Ap, output_ndims) Bp = broadcast_1d_array(self.Bp, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (Ap, Bp)") Cp = 0. else: try: Bp = SIGe = broadcast_1d_array(self.esig, output_ndims) SIGc = broadcast_1d_array(self.csig, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (esig, csig)") Ap = Cp = Ptop Pe = Ap + Bp * (Psurf - Cp) Pc = 0.5 * (Pe[0:-1] + Pe[1:]) if self.hybrid: ETAe = (Pe - Ptop)/(Psurf - Ptop) ETAc = (Pc - Ptop)/(Psurf - Ptop) else: SIGe = SIGe * np.ones_like(Psurf) SIGc = SIGc * np.ones_like(Psurf) Ze = prof_altitude(Pe, **kwargs) Zc = prof_altitude(Pc, **kwargs) all_vars = {'eta_edges': ETAe, 'eta_centers': ETAc, 'sigma_edges': SIGe, 'sigma_centers': SIGc, 'pressure_edges': Pe, 'pressure_centers': Pc, 'altitude_edges': Ze, 'altitude_centers': Zc} return all_vars
python
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs): """ Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details. """ Psurf = np.asarray(Psurf) output_ndims = Psurf.ndim + 1 if output_ndims > 3: raise ValueError("`Psurf` argument must be a float or an array" " with <= 2 dimensions (or None)") # Compute all variables: takes not much memory, fast # and better for code reading SIGe = None SIGc = None ETAe = None ETAc = None if self.hybrid: try: Ap = broadcast_1d_array(self.Ap, output_ndims) Bp = broadcast_1d_array(self.Bp, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (Ap, Bp)") Cp = 0. else: try: Bp = SIGe = broadcast_1d_array(self.esig, output_ndims) SIGc = broadcast_1d_array(self.csig, output_ndims) except KeyError: raise ValueError("Impossible to compute vertical levels," " data is missing (esig, csig)") Ap = Cp = Ptop Pe = Ap + Bp * (Psurf - Cp) Pc = 0.5 * (Pe[0:-1] + Pe[1:]) if self.hybrid: ETAe = (Pe - Ptop)/(Psurf - Ptop) ETAc = (Pc - Ptop)/(Psurf - Ptop) else: SIGe = SIGe * np.ones_like(Psurf) SIGc = SIGc * np.ones_like(Psurf) Ze = prof_altitude(Pe, **kwargs) Zc = prof_altitude(Pc, **kwargs) all_vars = {'eta_edges': ETAe, 'eta_centers': ETAc, 'sigma_edges': SIGe, 'sigma_centers': SIGc, 'pressure_edges': Pe, 'pressure_centers': Pc, 'altitude_edges': Ze, 'altitude_centers': Zc} return all_vars
[ "def", "get_layers", "(", "self", ",", "Psurf", "=", "1013.25", ",", "Ptop", "=", "0.01", ",", "*", "*", "kwargs", ")", ":", "Psurf", "=", "np", ".", "asarray", "(", "Psurf", ")", "output_ndims", "=", "Psurf", ".", "ndim", "+", "1", "if", "output_ndims", ">", "3", ":", "raise", "ValueError", "(", "\"`Psurf` argument must be a float or an array\"", "\" with <= 2 dimensions (or None)\"", ")", "# Compute all variables: takes not much memory, fast", "# and better for code reading", "SIGe", "=", "None", "SIGc", "=", "None", "ETAe", "=", "None", "ETAc", "=", "None", "if", "self", ".", "hybrid", ":", "try", ":", "Ap", "=", "broadcast_1d_array", "(", "self", ".", "Ap", ",", "output_ndims", ")", "Bp", "=", "broadcast_1d_array", "(", "self", ".", "Bp", ",", "output_ndims", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Impossible to compute vertical levels,\"", "\" data is missing (Ap, Bp)\"", ")", "Cp", "=", "0.", "else", ":", "try", ":", "Bp", "=", "SIGe", "=", "broadcast_1d_array", "(", "self", ".", "esig", ",", "output_ndims", ")", "SIGc", "=", "broadcast_1d_array", "(", "self", ".", "csig", ",", "output_ndims", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Impossible to compute vertical levels,\"", "\" data is missing (esig, csig)\"", ")", "Ap", "=", "Cp", "=", "Ptop", "Pe", "=", "Ap", "+", "Bp", "*", "(", "Psurf", "-", "Cp", ")", "Pc", "=", "0.5", "*", "(", "Pe", "[", "0", ":", "-", "1", "]", "+", "Pe", "[", "1", ":", "]", ")", "if", "self", ".", "hybrid", ":", "ETAe", "=", "(", "Pe", "-", "Ptop", ")", "/", "(", "Psurf", "-", "Ptop", ")", "ETAc", "=", "(", "Pc", "-", "Ptop", ")", "/", "(", "Psurf", "-", "Ptop", ")", "else", ":", "SIGe", "=", "SIGe", "*", "np", ".", "ones_like", "(", "Psurf", ")", "SIGc", "=", "SIGc", "*", "np", ".", "ones_like", "(", "Psurf", ")", "Ze", "=", "prof_altitude", "(", "Pe", ",", "*", "*", "kwargs", ")", "Zc", "=", "prof_altitude", "(", "Pc", ",", "*", "*", "kwargs", ")", "all_vars", "=", "{", "'eta_edges'", ":", "ETAe", ",", "'eta_centers'", ":", "ETAc", ",", "'sigma_edges'", ":", "SIGe", ",", "'sigma_centers'", ":", "SIGc", ",", "'pressure_edges'", ":", "Pe", ",", "'pressure_centers'", ":", "Pc", ",", "'altitude_edges'", ":", "Ze", ",", "'altitude_centers'", ":", "Zc", "}", "return", "all_vars" ]
Compute scalars or coordinates associated to the vertical layers. Parameters ---------- grid_spec : CTMGrid object CTMGrid containing the information necessary to re-construct grid levels for a given model coordinate system. Returns ------- dictionary of vertical grid components, including eta (unitless), sigma (unitless), pressure (hPa), and altitude (km) on both layer centers and edges, ordered from bottom-to-top. Notes ----- For pure sigma grids, sigma coordinates are given by the esig (edges) and csig (centers). For both pure sigma and hybrid grids, pressures at layers edges L are calculated as follows: .. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p) where :math:`P_{surf}`, :math:`P_{top}` Air pressures at the surface and the top of the modeled atmosphere (:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid` instance). :math:`A_p(L)`, :math:`Bp(L)` Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid grids, or respectively equals :math:`P_{top}` and :attr:`esig` attribute for pure sigma grids. :math:`Cp(L)` equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid grids. Pressures at grid centers are averages of pressures at grid edges: .. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2 For hybrid grids, ETA coordinates of grid edges and grid centers are given by; .. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top}) .. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top}) Altitude values are fit using a 5th-degree polynomial; see `gridspec.prof_altitude` for more details.
[ "Compute", "scalars", "or", "coordinates", "associated", "to", "the", "vertical", "layers", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/grid.py#L217-L324
train
darothen/xbpch
xbpch/grid.py
CTMGrid.get_lonlat
def get_lonlat(self): """ Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees. """ rlon, rlat = self.resolution # Compute number of grid cells in each direction Nlon = int(360. / rlon) Nlat = int(180. / rlat) + self.halfpolar # Compute grid cell edges elon = np.arange(Nlon + 1) * rlon - np.array(180.) elon -= rlon / 2. * self.center180 elat = np.arange(Nlat + 1) * rlat - np.array(90.) elat -= rlat / 2. * self.halfpolar elat[0] = -90. elat[-1] = 90. # Compute grid cell centers clon = (elon - (rlon / 2.))[1:] clat = np.arange(Nlat) * rlat - np.array(90.) # Fix grid boundaries if halfpolar if self.halfpolar: clat[0] = (elat[0] + elat[1]) / 2. clat[-1] = -clat[0] else: clat += (elat[1] - elat[0]) / 2. return { "lon_centers": clon, "lat_centers": clat, "lon_edges": elon, "lat_edges": elat }
python
def get_lonlat(self): """ Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees. """ rlon, rlat = self.resolution # Compute number of grid cells in each direction Nlon = int(360. / rlon) Nlat = int(180. / rlat) + self.halfpolar # Compute grid cell edges elon = np.arange(Nlon + 1) * rlon - np.array(180.) elon -= rlon / 2. * self.center180 elat = np.arange(Nlat + 1) * rlat - np.array(90.) elat -= rlat / 2. * self.halfpolar elat[0] = -90. elat[-1] = 90. # Compute grid cell centers clon = (elon - (rlon / 2.))[1:] clat = np.arange(Nlat) * rlat - np.array(90.) # Fix grid boundaries if halfpolar if self.halfpolar: clat[0] = (elat[0] + elat[1]) / 2. clat[-1] = -clat[0] else: clat += (elat[1] - elat[0]) / 2. return { "lon_centers": clon, "lat_centers": clat, "lon_edges": elon, "lat_edges": elat }
[ "def", "get_lonlat", "(", "self", ")", ":", "rlon", ",", "rlat", "=", "self", ".", "resolution", "# Compute number of grid cells in each direction", "Nlon", "=", "int", "(", "360.", "/", "rlon", ")", "Nlat", "=", "int", "(", "180.", "/", "rlat", ")", "+", "self", ".", "halfpolar", "# Compute grid cell edges", "elon", "=", "np", ".", "arange", "(", "Nlon", "+", "1", ")", "*", "rlon", "-", "np", ".", "array", "(", "180.", ")", "elon", "-=", "rlon", "/", "2.", "*", "self", ".", "center180", "elat", "=", "np", ".", "arange", "(", "Nlat", "+", "1", ")", "*", "rlat", "-", "np", ".", "array", "(", "90.", ")", "elat", "-=", "rlat", "/", "2.", "*", "self", ".", "halfpolar", "elat", "[", "0", "]", "=", "-", "90.", "elat", "[", "-", "1", "]", "=", "90.", "# Compute grid cell centers", "clon", "=", "(", "elon", "-", "(", "rlon", "/", "2.", ")", ")", "[", "1", ":", "]", "clat", "=", "np", ".", "arange", "(", "Nlat", ")", "*", "rlat", "-", "np", ".", "array", "(", "90.", ")", "# Fix grid boundaries if halfpolar", "if", "self", ".", "halfpolar", ":", "clat", "[", "0", "]", "=", "(", "elat", "[", "0", "]", "+", "elat", "[", "1", "]", ")", "/", "2.", "clat", "[", "-", "1", "]", "=", "-", "clat", "[", "0", "]", "else", ":", "clat", "+=", "(", "elat", "[", "1", "]", "-", "elat", "[", "0", "]", ")", "/", "2.", "return", "{", "\"lon_centers\"", ":", "clon", ",", "\"lat_centers\"", ":", "clat", ",", "\"lon_edges\"", ":", "elon", ",", "\"lat_edges\"", ":", "elat", "}" ]
Calculate longitude-latitude grid for a specified resolution and configuration / ordering. Parameters ---------- rlon, rlat : float Resolution (in degrees) of longitude and latitude grids. halfpolar : bool (default=True) Polar grid boxes span half of rlat relative to the other grid cells. center180 : bool (default=True) Longitude grid should be centered at 180 degrees.
[ "Calculate", "longitude", "-", "latitude", "grid", "for", "a", "specified", "resolution", "and", "configuration", "/", "ordering", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/grid.py#L327-L371
train
openSUSE/py2pack
py2pack/__init__.py
_get_template_dirs
def _get_template_dirs(): """existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!""" return filter(lambda x: os.path.exists(x), [ # user dir os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'), # system wide dir os.path.join('/', 'usr', 'share', 'py2pack', 'templates'), # usually inside the site-packages dir os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), ])
python
def _get_template_dirs(): """existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!""" return filter(lambda x: os.path.exists(x), [ # user dir os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'), # system wide dir os.path.join('/', 'usr', 'share', 'py2pack', 'templates'), # usually inside the site-packages dir os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), ])
[ "def", "_get_template_dirs", "(", ")", ":", "return", "filter", "(", "lambda", "x", ":", "os", ".", "path", ".", "exists", "(", "x", ")", ",", "[", "# user dir", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.py2pack'", ",", "'templates'", ")", ",", "# system wide dir", "os", ".", "path", ".", "join", "(", "'/'", ",", "'usr'", ",", "'share'", ",", "'py2pack'", ",", "'templates'", ")", ",", "# usually inside the site-packages dir", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "'templates'", ")", ",", "]", ")" ]
existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!
[ "existing", "directories", "where", "to", "search", "for", "jinja2", "templates", ".", "The", "order", "is", "important", ".", "The", "first", "found", "template", "from", "the", "first", "found", "dir", "wins!" ]
4ff689900dd2c1a390b5359ce1037c188b75fc65
https://github.com/openSUSE/py2pack/blob/4ff689900dd2c1a390b5359ce1037c188b75fc65/py2pack/__init__.py#L53-L63
train
openSUSE/py2pack
py2pack/__init__.py
_license_from_classifiers
def _license_from_classifiers(data): """try to get a license from the classifiers""" classifiers = data.get('classifiers', []) found_license = None for c in classifiers: if c.startswith("License :: OSI Approved :: "): found_license = c.replace("License :: OSI Approved :: ", "") return found_license
python
def _license_from_classifiers(data): """try to get a license from the classifiers""" classifiers = data.get('classifiers', []) found_license = None for c in classifiers: if c.startswith("License :: OSI Approved :: "): found_license = c.replace("License :: OSI Approved :: ", "") return found_license
[ "def", "_license_from_classifiers", "(", "data", ")", ":", "classifiers", "=", "data", ".", "get", "(", "'classifiers'", ",", "[", "]", ")", "found_license", "=", "None", "for", "c", "in", "classifiers", ":", "if", "c", ".", "startswith", "(", "\"License :: OSI Approved :: \"", ")", ":", "found_license", "=", "c", ".", "replace", "(", "\"License :: OSI Approved :: \"", ",", "\"\"", ")", "return", "found_license" ]
try to get a license from the classifiers
[ "try", "to", "get", "a", "license", "from", "the", "classifiers" ]
4ff689900dd2c1a390b5359ce1037c188b75fc65
https://github.com/openSUSE/py2pack/blob/4ff689900dd2c1a390b5359ce1037c188b75fc65/py2pack/__init__.py#L177-L184
train
openSUSE/py2pack
py2pack/__init__.py
_normalize_license
def _normalize_license(data): """try to get SDPX license""" license = data.get('license', None) if not license: # try to get license from classifiers license = _license_from_classifiers(data) if license: if license in SDPX_LICENSES.keys(): data['license'] = SDPX_LICENSES[license] else: data['license'] = "%s (FIXME:No SPDX)" % (license) else: data['license'] = ""
python
def _normalize_license(data): """try to get SDPX license""" license = data.get('license', None) if not license: # try to get license from classifiers license = _license_from_classifiers(data) if license: if license in SDPX_LICENSES.keys(): data['license'] = SDPX_LICENSES[license] else: data['license'] = "%s (FIXME:No SPDX)" % (license) else: data['license'] = ""
[ "def", "_normalize_license", "(", "data", ")", ":", "license", "=", "data", ".", "get", "(", "'license'", ",", "None", ")", "if", "not", "license", ":", "# try to get license from classifiers", "license", "=", "_license_from_classifiers", "(", "data", ")", "if", "license", ":", "if", "license", "in", "SDPX_LICENSES", ".", "keys", "(", ")", ":", "data", "[", "'license'", "]", "=", "SDPX_LICENSES", "[", "license", "]", "else", ":", "data", "[", "'license'", "]", "=", "\"%s (FIXME:No SPDX)\"", "%", "(", "license", ")", "else", ":", "data", "[", "'license'", "]", "=", "\"\"" ]
try to get SDPX license
[ "try", "to", "get", "SDPX", "license" ]
4ff689900dd2c1a390b5359ce1037c188b75fc65
https://github.com/openSUSE/py2pack/blob/4ff689900dd2c1a390b5359ce1037c188b75fc65/py2pack/__init__.py#L187-L199
train
asmeurer/iterm2-tools
iterm2_tools/ipython.py
wrap_prompts_class
def wrap_prompts_class(Klass): """ Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. """ try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
python
def wrap_prompts_class(Klass): """ Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations. """ try: from prompt_toolkit.token import ZeroWidthEscape except ImportError: return Klass class ITerm2IPythonPrompt(Klass): def in_prompt_tokens(self, cli=None): return [ (ZeroWidthEscape, last_status(self.shell)+BEFORE_PROMPT), ]+\ super(ITerm2IPythonPrompt, self).in_prompt_tokens(cli)+\ [(ZeroWidthEscape, AFTER_PROMPT)] return ITerm2IPythonPrompt
[ "def", "wrap_prompts_class", "(", "Klass", ")", ":", "try", ":", "from", "prompt_toolkit", ".", "token", "import", "ZeroWidthEscape", "except", "ImportError", ":", "return", "Klass", "class", "ITerm2IPythonPrompt", "(", "Klass", ")", ":", "def", "in_prompt_tokens", "(", "self", ",", "cli", "=", "None", ")", ":", "return", "[", "(", "ZeroWidthEscape", ",", "last_status", "(", "self", ".", "shell", ")", "+", "BEFORE_PROMPT", ")", ",", "]", "+", "super", "(", "ITerm2IPythonPrompt", ",", "self", ")", ".", "in_prompt_tokens", "(", "cli", ")", "+", "[", "(", "ZeroWidthEscape", ",", "AFTER_PROMPT", ")", "]", "return", "ITerm2IPythonPrompt" ]
Wrap an IPython's Prompt class This is needed in order for Prompt to inject the correct escape sequences at the right positions for shell integrations.
[ "Wrap", "an", "IPython", "s", "Prompt", "class" ]
97b1b593bb02884521c2c05ed414f178de0b934e
https://github.com/asmeurer/iterm2-tools/blob/97b1b593bb02884521c2c05ed414f178de0b934e/iterm2_tools/ipython.py#L113-L137
train
blixt/py-starbound
starbound/btreedb5.py
BTreeDB5.get_all_keys
def get_all_keys(self, start=None): """ A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree. """ s = self.stream if not start: start = HEADER_SIZE + self.block_size * self.root_block s.seek(start) block_type = s.read(2) if block_type == LEAF: reader = LeafReader(self) num_keys = struct.unpack('>i', reader.read(4))[0] for _ in range(num_keys): cur_key = reader.read(self.key_size) # We to a tell/seek here so that the user can read from # the file while this loop is still being run cur_pos = s.tell() yield cur_key s.seek(cur_pos) length = sbon.read_varint(reader) reader.seek(length, 1) elif block_type == INDEX: (_, num_keys, first_child) = struct.unpack('>Bii', s.read(9)) children = [first_child] for _ in range(num_keys): # Skip the key field. _ = s.read(self.key_size) # Read pointer to the child block. next_child = struct.unpack('>i', s.read(4))[0] children.append(next_child) for child_loc in children: for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc): yield key elif block_type == FREE: pass else: raise Exception('Unhandled block type: {}'.format(block_type))
python
def get_all_keys(self, start=None): """ A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree. """ s = self.stream if not start: start = HEADER_SIZE + self.block_size * self.root_block s.seek(start) block_type = s.read(2) if block_type == LEAF: reader = LeafReader(self) num_keys = struct.unpack('>i', reader.read(4))[0] for _ in range(num_keys): cur_key = reader.read(self.key_size) # We to a tell/seek here so that the user can read from # the file while this loop is still being run cur_pos = s.tell() yield cur_key s.seek(cur_pos) length = sbon.read_varint(reader) reader.seek(length, 1) elif block_type == INDEX: (_, num_keys, first_child) = struct.unpack('>Bii', s.read(9)) children = [first_child] for _ in range(num_keys): # Skip the key field. _ = s.read(self.key_size) # Read pointer to the child block. next_child = struct.unpack('>i', s.read(4))[0] children.append(next_child) for child_loc in children: for key in self.get_all_keys(HEADER_SIZE + self.block_size * child_loc): yield key elif block_type == FREE: pass else: raise Exception('Unhandled block type: {}'.format(block_type))
[ "def", "get_all_keys", "(", "self", ",", "start", "=", "None", ")", ":", "s", "=", "self", ".", "stream", "if", "not", "start", ":", "start", "=", "HEADER_SIZE", "+", "self", ".", "block_size", "*", "self", ".", "root_block", "s", ".", "seek", "(", "start", ")", "block_type", "=", "s", ".", "read", "(", "2", ")", "if", "block_type", "==", "LEAF", ":", "reader", "=", "LeafReader", "(", "self", ")", "num_keys", "=", "struct", ".", "unpack", "(", "'>i'", ",", "reader", ".", "read", "(", "4", ")", ")", "[", "0", "]", "for", "_", "in", "range", "(", "num_keys", ")", ":", "cur_key", "=", "reader", ".", "read", "(", "self", ".", "key_size", ")", "# We to a tell/seek here so that the user can read from", "# the file while this loop is still being run", "cur_pos", "=", "s", ".", "tell", "(", ")", "yield", "cur_key", "s", ".", "seek", "(", "cur_pos", ")", "length", "=", "sbon", ".", "read_varint", "(", "reader", ")", "reader", ".", "seek", "(", "length", ",", "1", ")", "elif", "block_type", "==", "INDEX", ":", "(", "_", ",", "num_keys", ",", "first_child", ")", "=", "struct", ".", "unpack", "(", "'>Bii'", ",", "s", ".", "read", "(", "9", ")", ")", "children", "=", "[", "first_child", "]", "for", "_", "in", "range", "(", "num_keys", ")", ":", "# Skip the key field.", "_", "=", "s", ".", "read", "(", "self", ".", "key_size", ")", "# Read pointer to the child block.", "next_child", "=", "struct", ".", "unpack", "(", "'>i'", ",", "s", ".", "read", "(", "4", ")", ")", "[", "0", "]", "children", ".", "append", "(", "next_child", ")", "for", "child_loc", "in", "children", ":", "for", "key", "in", "self", ".", "get_all_keys", "(", "HEADER_SIZE", "+", "self", ".", "block_size", "*", "child_loc", ")", ":", "yield", "key", "elif", "block_type", "==", "FREE", ":", "pass", "else", ":", "raise", "Exception", "(", "'Unhandled block type: {}'", ".", "format", "(", "block_type", ")", ")" ]
A generator which yields a list of all valid keys starting at the given `start` offset. If `start` is `None`, we will start from the root of the tree.
[ "A", "generator", "which", "yields", "a", "list", "of", "all", "valid", "keys", "starting", "at", "the", "given", "start", "offset", ".", "If", "start", "is", "None", "we", "will", "start", "from", "the", "root", "of", "the", "tree", "." ]
68a2f6bfef73d8803191f937c69005a64eeae017
https://github.com/blixt/py-starbound/blob/68a2f6bfef73d8803191f937c69005a64eeae017/starbound/btreedb5.py#L69-L107
train
darothen/xbpch
xbpch/uff.py
_replace_star
def _replace_star(fmt, size): """ Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi' """ n_stars = fmt.count('*') if n_stars > 1: raise ValueError("More than one `*` in format (%s)." % fmt) if n_stars: i = fmt.find('*') s = struct.calcsize(fmt.replace(fmt[i:i + 2], '')) n = old_div((size - s), struct.calcsize(fmt[i + 1])) fmt = fmt.replace('*', str(n)) return fmt
python
def _replace_star(fmt, size): """ Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi' """ n_stars = fmt.count('*') if n_stars > 1: raise ValueError("More than one `*` in format (%s)." % fmt) if n_stars: i = fmt.find('*') s = struct.calcsize(fmt.replace(fmt[i:i + 2], '')) n = old_div((size - s), struct.calcsize(fmt[i + 1])) fmt = fmt.replace('*', str(n)) return fmt
[ "def", "_replace_star", "(", "fmt", ",", "size", ")", ":", "n_stars", "=", "fmt", ".", "count", "(", "'*'", ")", "if", "n_stars", ">", "1", ":", "raise", "ValueError", "(", "\"More than one `*` in format (%s).\"", "%", "fmt", ")", "if", "n_stars", ":", "i", "=", "fmt", ".", "find", "(", "'*'", ")", "s", "=", "struct", ".", "calcsize", "(", "fmt", ".", "replace", "(", "fmt", "[", "i", ":", "i", "+", "2", "]", ",", "''", ")", ")", "n", "=", "old_div", "(", "(", "size", "-", "s", ")", ",", "struct", ".", "calcsize", "(", "fmt", "[", "i", "+", "1", "]", ")", ")", "fmt", "=", "fmt", ".", "replace", "(", "'*'", ",", "str", "(", "n", ")", ")", "return", "fmt" ]
Replace the `*` placeholder in a format string (fmt), so that struct.calcsize(fmt) is equal to the given `size` using the format following the placeholder. Raises `ValueError` if number of `*` is larger than 1. If no `*` in `fmt`, returns `fmt` without checking its size! Examples -------- >>> _replace_star('ii*fi', 40) 'ii7fi'
[ "Replace", "the", "*", "placeholder", "in", "a", "format", "string", "(", "fmt", ")", "so", "that", "struct", ".", "calcsize", "(", "fmt", ")", "is", "equal", "to", "the", "given", "size", "using", "the", "format", "following", "the", "placeholder", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L160-L186
train
darothen/xbpch
xbpch/uff.py
FortranFile._fix
def _fix(self, fmt='i'): """ Read pre- or suffix of line at current position with given format `fmt` (default 'i'). """ fmt = self.endian + fmt fix = self.read(struct.calcsize(fmt)) if fix: return struct.unpack(fmt, fix)[0] else: raise EOFError
python
def _fix(self, fmt='i'): """ Read pre- or suffix of line at current position with given format `fmt` (default 'i'). """ fmt = self.endian + fmt fix = self.read(struct.calcsize(fmt)) if fix: return struct.unpack(fmt, fix)[0] else: raise EOFError
[ "def", "_fix", "(", "self", ",", "fmt", "=", "'i'", ")", ":", "fmt", "=", "self", ".", "endian", "+", "fmt", "fix", "=", "self", ".", "read", "(", "struct", ".", "calcsize", "(", "fmt", ")", ")", "if", "fix", ":", "return", "struct", ".", "unpack", "(", "fmt", ",", "fix", ")", "[", "0", "]", "else", ":", "raise", "EOFError" ]
Read pre- or suffix of line at current position with given format `fmt` (default 'i').
[ "Read", "pre", "-", "or", "suffix", "of", "line", "at", "current", "position", "with", "given", "format", "fmt", "(", "default", "i", ")", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L67-L77
train
darothen/xbpch
xbpch/uff.py
FortranFile.readline
def readline(self, fmt=None): """ Return next unformatted "line". If format is given, unpack content, otherwise return byte string. """ prefix_size = self._fix() if fmt is None: content = self.read(prefix_size) else: fmt = self.endian + fmt fmt = _replace_star(fmt, prefix_size) content = struct.unpack(fmt, self.read(prefix_size)) try: suffix_size = self._fix() except EOFError: # when endian is invalid and prefix_size > total file size suffix_size = -1 if prefix_size != suffix_size: raise IOError(_FIX_ERROR) return content
python
def readline(self, fmt=None): """ Return next unformatted "line". If format is given, unpack content, otherwise return byte string. """ prefix_size = self._fix() if fmt is None: content = self.read(prefix_size) else: fmt = self.endian + fmt fmt = _replace_star(fmt, prefix_size) content = struct.unpack(fmt, self.read(prefix_size)) try: suffix_size = self._fix() except EOFError: # when endian is invalid and prefix_size > total file size suffix_size = -1 if prefix_size != suffix_size: raise IOError(_FIX_ERROR) return content
[ "def", "readline", "(", "self", ",", "fmt", "=", "None", ")", ":", "prefix_size", "=", "self", ".", "_fix", "(", ")", "if", "fmt", "is", "None", ":", "content", "=", "self", ".", "read", "(", "prefix_size", ")", "else", ":", "fmt", "=", "self", ".", "endian", "+", "fmt", "fmt", "=", "_replace_star", "(", "fmt", ",", "prefix_size", ")", "content", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "read", "(", "prefix_size", ")", ")", "try", ":", "suffix_size", "=", "self", ".", "_fix", "(", ")", "except", "EOFError", ":", "# when endian is invalid and prefix_size > total file size", "suffix_size", "=", "-", "1", "if", "prefix_size", "!=", "suffix_size", ":", "raise", "IOError", "(", "_FIX_ERROR", ")", "return", "content" ]
Return next unformatted "line". If format is given, unpack content, otherwise return byte string.
[ "Return", "next", "unformatted", "line", ".", "If", "format", "is", "given", "unpack", "content", "otherwise", "return", "byte", "string", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L79-L102
train
darothen/xbpch
xbpch/uff.py
FortranFile.skipline
def skipline(self): """ Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. """ position = self.tell() prefix = self._fix() self.seek(prefix, 1) # skip content suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
python
def skipline(self): """ Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. """ position = self.tell() prefix = self._fix() self.seek(prefix, 1) # skip content suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
[ "def", "skipline", "(", "self", ")", ":", "position", "=", "self", ".", "tell", "(", ")", "prefix", "=", "self", ".", "_fix", "(", ")", "self", ".", "seek", "(", "prefix", ",", "1", ")", "# skip content", "suffix", "=", "self", ".", "_fix", "(", ")", "if", "prefix", "!=", "suffix", ":", "raise", "IOError", "(", "_FIX_ERROR", ")", "return", "position", ",", "prefix" ]
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match.
[ "Skip", "the", "next", "line", "and", "returns", "position", "and", "size", "of", "line", ".", "Raises", "IOError", "if", "pre", "-", "and", "suffix", "of", "line", "do", "not", "match", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L110-L123
train
darothen/xbpch
xbpch/uff.py
FortranFile.writeline
def writeline(self, fmt, *args): """ Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). """ fmt = self.endian + fmt size = struct.calcsize(fmt) fix = struct.pack(self.endian + 'i', size) line = struct.pack(fmt, *args) self.write(fix) self.write(line) self.write(fix)
python
def writeline(self, fmt, *args): """ Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). """ fmt = self.endian + fmt size = struct.calcsize(fmt) fix = struct.pack(self.endian + 'i', size) line = struct.pack(fmt, *args) self.write(fix) self.write(line) self.write(fix)
[ "def", "writeline", "(", "self", ",", "fmt", ",", "*", "args", ")", ":", "fmt", "=", "self", ".", "endian", "+", "fmt", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "fix", "=", "struct", ".", "pack", "(", "self", ".", "endian", "+", "'i'", ",", "size", ")", "line", "=", "struct", ".", "pack", "(", "fmt", ",", "*", "args", ")", "self", ".", "write", "(", "fix", ")", "self", ".", "write", "(", "line", ")", "self", ".", "write", "(", "fix", ")" ]
Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings).
[ "Write", "line", "(", "list", "of", "objects", ")", "with", "given", "fmt", "to", "file", ".", "The", "line", "will", "be", "chained", "if", "object", "is", "iterable", "(", "except", "for", "basestrings", ")", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L125-L139
train
darothen/xbpch
xbpch/uff.py
FortranFile.writelines
def writelines(self, lines, fmt): """ Write `lines` with given `format`. """ if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
python
def writelines(self, lines, fmt): """ Write `lines` with given `format`. """ if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
[ "def", "writelines", "(", "self", ",", "lines", ",", "fmt", ")", ":", "if", "isinstance", "(", "fmt", ",", "basestring", ")", ":", "fmt", "=", "[", "fmt", "]", "*", "len", "(", "lines", ")", "for", "f", ",", "line", "in", "zip", "(", "fmt", ",", "lines", ")", ":", "self", ".", "writeline", "(", "f", ",", "line", ",", "self", ".", "endian", ")" ]
Write `lines` with given `format`.
[ "Write", "lines", "with", "given", "format", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L141-L148
train
blixt/py-starbound
starbound/sbon.py
read_varint
def read_varint(stream): """Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number. """ value = 0 while True: byte = ord(stream.read(1)) if not byte & 0b10000000: return value << 7 | byte value = value << 7 | (byte & 0b01111111)
python
def read_varint(stream): """Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number. """ value = 0 while True: byte = ord(stream.read(1)) if not byte & 0b10000000: return value << 7 | byte value = value << 7 | (byte & 0b01111111)
[ "def", "read_varint", "(", "stream", ")", ":", "value", "=", "0", "while", "True", ":", "byte", "=", "ord", "(", "stream", ".", "read", "(", "1", ")", ")", "if", "not", "byte", "&", "0b10000000", ":", "return", "value", "<<", "7", "|", "byte", "value", "=", "value", "<<", "7", "|", "(", "byte", "&", "0b01111111", ")" ]
Read while the most significant bit is set, then put the 7 least significant bits of all read bytes together to create a number.
[ "Read", "while", "the", "most", "significant", "bit", "is", "set", "then", "put", "the", "7", "least", "significant", "bits", "of", "all", "read", "bytes", "together", "to", "create", "a", "number", "." ]
68a2f6bfef73d8803191f937c69005a64eeae017
https://github.com/blixt/py-starbound/blob/68a2f6bfef73d8803191f937c69005a64eeae017/starbound/sbon.py#L70-L80
train
darothen/xbpch
xbpch/core.py
open_bpchdataset
def open_bpchdataset(filename, fields=[], categories=[], tracerinfo_file='tracerinfo.dat', diaginfo_file='diaginfo.dat', endian=">", decode_cf=True, memmap=True, dask=True, return_store=False): """ Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk """ store = BPCHDataStore( filename, fields=fields, categories=categories, tracerinfo_file=tracerinfo_file, diaginfo_file=diaginfo_file, endian=endian, use_mmap=memmap, dask_delayed=dask ) ds = xr.Dataset.load_store(store) # Record what the file object underlying the store which we culled this # Dataset from is so that we can clean it up later ds._file_obj = store._bpch # Handle CF corrections if decode_cf: decoded_vars = OrderedDict() rename_dict = {} for v in ds.variables: cf_name = cf.get_valid_varname(v) rename_dict[v] = cf_name new_var = cf.enforce_cf_variable(ds[v]) decoded_vars[cf_name] = new_var ds = xr.Dataset(decoded_vars, attrs=ds.attrs.copy()) # ds.rename(rename_dict, inplace=True) # TODO: There's a bug with xr.decode_cf which eagerly loads data. # Re-enable this once that bug is fixed # Note that we do not need to decode the times because we explicitly # kept track of them as we parsed the data. # ds = xr.decode_cf(ds, decode_times=False) # Set attributes for CF conventions ts = get_timestamp() ds.attrs.update(dict( Conventions='CF1.6', source=filename, tracerinfo=tracerinfo_file, diaginfo=diaginfo_file, filetype=store._bpch.filetype, filetitle=store._bpch.filetitle, history=( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, filename) ), )) # To immediately load the data from the BPCHDataProxy paylods, need # to execute ds.data_vars for some reason... if return_store: return ds, store else: return ds
python
def open_bpchdataset(filename, fields=[], categories=[], tracerinfo_file='tracerinfo.dat', diaginfo_file='diaginfo.dat', endian=">", decode_cf=True, memmap=True, dask=True, return_store=False): """ Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk """ store = BPCHDataStore( filename, fields=fields, categories=categories, tracerinfo_file=tracerinfo_file, diaginfo_file=diaginfo_file, endian=endian, use_mmap=memmap, dask_delayed=dask ) ds = xr.Dataset.load_store(store) # Record what the file object underlying the store which we culled this # Dataset from is so that we can clean it up later ds._file_obj = store._bpch # Handle CF corrections if decode_cf: decoded_vars = OrderedDict() rename_dict = {} for v in ds.variables: cf_name = cf.get_valid_varname(v) rename_dict[v] = cf_name new_var = cf.enforce_cf_variable(ds[v]) decoded_vars[cf_name] = new_var ds = xr.Dataset(decoded_vars, attrs=ds.attrs.copy()) # ds.rename(rename_dict, inplace=True) # TODO: There's a bug with xr.decode_cf which eagerly loads data. # Re-enable this once that bug is fixed # Note that we do not need to decode the times because we explicitly # kept track of them as we parsed the data. # ds = xr.decode_cf(ds, decode_times=False) # Set attributes for CF conventions ts = get_timestamp() ds.attrs.update(dict( Conventions='CF1.6', source=filename, tracerinfo=tracerinfo_file, diaginfo=diaginfo_file, filetype=store._bpch.filetype, filetitle=store._bpch.filetitle, history=( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, filename) ), )) # To immediately load the data from the BPCHDataProxy paylods, need # to execute ds.data_vars for some reason... if return_store: return ds, store else: return ds
[ "def", "open_bpchdataset", "(", "filename", ",", "fields", "=", "[", "]", ",", "categories", "=", "[", "]", ",", "tracerinfo_file", "=", "'tracerinfo.dat'", ",", "diaginfo_file", "=", "'diaginfo.dat'", ",", "endian", "=", "\">\"", ",", "decode_cf", "=", "True", ",", "memmap", "=", "True", ",", "dask", "=", "True", ",", "return_store", "=", "False", ")", ":", "store", "=", "BPCHDataStore", "(", "filename", ",", "fields", "=", "fields", ",", "categories", "=", "categories", ",", "tracerinfo_file", "=", "tracerinfo_file", ",", "diaginfo_file", "=", "diaginfo_file", ",", "endian", "=", "endian", ",", "use_mmap", "=", "memmap", ",", "dask_delayed", "=", "dask", ")", "ds", "=", "xr", ".", "Dataset", ".", "load_store", "(", "store", ")", "# Record what the file object underlying the store which we culled this", "# Dataset from is so that we can clean it up later", "ds", ".", "_file_obj", "=", "store", ".", "_bpch", "# Handle CF corrections", "if", "decode_cf", ":", "decoded_vars", "=", "OrderedDict", "(", ")", "rename_dict", "=", "{", "}", "for", "v", "in", "ds", ".", "variables", ":", "cf_name", "=", "cf", ".", "get_valid_varname", "(", "v", ")", "rename_dict", "[", "v", "]", "=", "cf_name", "new_var", "=", "cf", ".", "enforce_cf_variable", "(", "ds", "[", "v", "]", ")", "decoded_vars", "[", "cf_name", "]", "=", "new_var", "ds", "=", "xr", ".", "Dataset", "(", "decoded_vars", ",", "attrs", "=", "ds", ".", "attrs", ".", "copy", "(", ")", ")", "# ds.rename(rename_dict, inplace=True)", "# TODO: There's a bug with xr.decode_cf which eagerly loads data.", "# Re-enable this once that bug is fixed", "# Note that we do not need to decode the times because we explicitly", "# kept track of them as we parsed the data.", "# ds = xr.decode_cf(ds, decode_times=False)", "# Set attributes for CF conventions", "ts", "=", "get_timestamp", "(", ")", "ds", ".", "attrs", ".", "update", "(", "dict", "(", "Conventions", "=", "'CF1.6'", ",", "source", "=", "filename", ",", "tracerinfo", "=", "tracerinfo_file", ",", "diaginfo", "=", "diaginfo_file", ",", "filetype", "=", "store", ".", "_bpch", ".", "filetype", ",", "filetitle", "=", "store", ".", "_bpch", ".", "filetitle", ",", "history", "=", "(", "\"{}: Processed/loaded by xbpch-{} from {}\"", ".", "format", "(", "ts", ",", "ver", ",", "filename", ")", ")", ",", ")", ")", "# To immediately load the data from the BPCHDataProxy paylods, need", "# to execute ds.data_vars for some reason...", "if", "return_store", ":", "return", "ds", ",", "store", "else", ":", "return", "ds" ]
Open a GEOS-Chem BPCH file output as an xarray Dataset. Parameters ---------- filename : string Path to the output file to read in. {tracerinfo,diaginfo}_file : string, optional Path to the metadata "info" .dat files which are used to decipher the metadata corresponding to each variable in the output dataset. If not provided, will look for them in the current directory or fall back on a generic set. fields : list, optional List of a subset of variable names to return. This can substantially improve read performance. Note that the field here is just the tracer name - not the category, e.g. 'O3' instead of 'IJ-AVG-$_O3'. categories : list, optional List a subset of variable categories to look through. This can substantially improve read performance. endian : {'=', '>', '<'}, optional Endianness of file on disk. By default, "big endian" (">") is assumed. decode_cf : bool Enforce CF conventions for variable names, units, and other metadata default_dtype : numpy.dtype, optional Default datatype for variables encoded in file on disk (single-precision float by default). memmap : bool Flag indicating that data should be memory-mapped from disk instead of eagerly loaded into memory dask : bool Flag indicating that data reading should be deferred (delayed) to construct a task-graph for later execution return_store : bool Also return the underlying DataStore to the user Returns ------- ds : xarray.Dataset Dataset containing the requested fields (or the entire file), with data contained in proxy containers for access later. store : xarray.AbstractDataStore Underlying DataStore which handles the loading and processing of bpch files on disk
[ "Open", "a", "GEOS", "-", "Chem", "BPCH", "file", "output", "as", "an", "xarray", "Dataset", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/core.py#L26-L126
train
darothen/xbpch
xbpch/core.py
open_mfbpchdataset
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts', preprocess=None, lock=None, **kwargs): """ Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. """ from xarray.backends.api import _MultiFileCloser # TODO: Include file locks? # Check for dask dask = kwargs.pop('dask', False) if not dask: raise ValueError("Reading multiple files without dask is not supported") kwargs['dask'] = True # Add th if isinstance(paths, basestring): paths = sorted(glob(paths)) if not paths: raise IOError("No paths to files were passed into open_mfbpchdataset") datasets = [open_bpchdataset(filename, **kwargs) for filename in paths] bpch_objs = [ds._file_obj for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] # Concatenate over time combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim) combined._file_obj = _MultiFileCloser(bpch_objs) combined.attrs = datasets[0].attrs ts = get_timestamp() fns_str = " ".join(paths) combined.attrs['history'] = ( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, fns_str) ) return combined
python
def open_mfbpchdataset(paths, concat_dim='time', compat='no_conflicts', preprocess=None, lock=None, **kwargs): """ Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`. """ from xarray.backends.api import _MultiFileCloser # TODO: Include file locks? # Check for dask dask = kwargs.pop('dask', False) if not dask: raise ValueError("Reading multiple files without dask is not supported") kwargs['dask'] = True # Add th if isinstance(paths, basestring): paths = sorted(glob(paths)) if not paths: raise IOError("No paths to files were passed into open_mfbpchdataset") datasets = [open_bpchdataset(filename, **kwargs) for filename in paths] bpch_objs = [ds._file_obj for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] # Concatenate over time combined = xr.auto_combine(datasets, compat=compat, concat_dim=concat_dim) combined._file_obj = _MultiFileCloser(bpch_objs) combined.attrs = datasets[0].attrs ts = get_timestamp() fns_str = " ".join(paths) combined.attrs['history'] = ( "{}: Processed/loaded by xbpch-{} from {}" .format(ts, ver, fns_str) ) return combined
[ "def", "open_mfbpchdataset", "(", "paths", ",", "concat_dim", "=", "'time'", ",", "compat", "=", "'no_conflicts'", ",", "preprocess", "=", "None", ",", "lock", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "xarray", ".", "backends", ".", "api", "import", "_MultiFileCloser", "# TODO: Include file locks?", "# Check for dask", "dask", "=", "kwargs", ".", "pop", "(", "'dask'", ",", "False", ")", "if", "not", "dask", ":", "raise", "ValueError", "(", "\"Reading multiple files without dask is not supported\"", ")", "kwargs", "[", "'dask'", "]", "=", "True", "# Add th", "if", "isinstance", "(", "paths", ",", "basestring", ")", ":", "paths", "=", "sorted", "(", "glob", "(", "paths", ")", ")", "if", "not", "paths", ":", "raise", "IOError", "(", "\"No paths to files were passed into open_mfbpchdataset\"", ")", "datasets", "=", "[", "open_bpchdataset", "(", "filename", ",", "*", "*", "kwargs", ")", "for", "filename", "in", "paths", "]", "bpch_objs", "=", "[", "ds", ".", "_file_obj", "for", "ds", "in", "datasets", "]", "if", "preprocess", "is", "not", "None", ":", "datasets", "=", "[", "preprocess", "(", "ds", ")", "for", "ds", "in", "datasets", "]", "# Concatenate over time", "combined", "=", "xr", ".", "auto_combine", "(", "datasets", ",", "compat", "=", "compat", ",", "concat_dim", "=", "concat_dim", ")", "combined", ".", "_file_obj", "=", "_MultiFileCloser", "(", "bpch_objs", ")", "combined", ".", "attrs", "=", "datasets", "[", "0", "]", ".", "attrs", "ts", "=", "get_timestamp", "(", ")", "fns_str", "=", "\" \"", ".", "join", "(", "paths", ")", "combined", ".", "attrs", "[", "'history'", "]", "=", "(", "\"{}: Processed/loaded by xbpch-{} from {}\"", ".", "format", "(", "ts", ",", "ver", ",", "fns_str", ")", ")", "return", "combined" ]
Open multiple bpch files as a single dataset. You must have dask installed for this to work, as this greatly simplifies issues relating to multi-file I/O. Also, please note that this is not a very performant routine. I/O is still limited by the fact that we need to manually scan/read through each bpch file so that we can figure out what its contents are, since that metadata isn't saved anywhere. So this routine will actually sequentially load Datasets for each bpch file, then concatenate them along the "time" axis. You may wish to simply process each file individually, coerce to NetCDF, and then ingest through xarray as normal. Parameters ---------- paths : list of strs Filenames to load; order doesn't matter as they will be lexicographically sorted before we read in the data concat_dim : str, default='time' Dimension to concatenate Datasets over. We default to "time" since this is how GEOS-Chem splits output files compat : str (optional) String indicating how to compare variables of the same name for potential conflicts when merging: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable (optional) A pre-processing function to apply to each Dataset prior to concatenation lock : False, True, or threading.Lock (optional) Passed to :py:func:`dask.array.from_array`. By default, xarray employs a per-variable lock when reading data from NetCDF files, but this model has not yet been extended or implemented for bpch files and so this is not actually used. However, it is likely necessary before dask's multi-threaded backend can be used **kwargs : optional Additional arguments to pass to :py:func:`xbpch.open_bpchdataset`.
[ "Open", "multiple", "bpch", "files", "as", "a", "single", "dataset", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/core.py#L129-L215
train
bitlabstudio/django-multilingual-news
multilingual_news/south_migrations/0006_migrate_placeholder_fields.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry in orm['multilingual_news.NewsEntry'].objects.all(): self.migrate_placeholder( orm, entry, 'excerpt', 'multilingual_news_excerpt', 'excerpt') self.migrate_placeholder( orm, entry, 'content', 'multilingual_news_content', 'content')
python
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry in orm['multilingual_news.NewsEntry'].objects.all(): self.migrate_placeholder( orm, entry, 'excerpt', 'multilingual_news_excerpt', 'excerpt') self.migrate_placeholder( orm, entry, 'content', 'multilingual_news_content', 'content')
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "# Note: Remember to use orm['appname.ModelName'] rather than \"from appname.models...\"", "for", "entry", "in", "orm", "[", "'multilingual_news.NewsEntry'", "]", ".", "objects", ".", "all", "(", ")", ":", "self", ".", "migrate_placeholder", "(", "orm", ",", "entry", ",", "'excerpt'", ",", "'multilingual_news_excerpt'", ",", "'excerpt'", ")", "self", ".", "migrate_placeholder", "(", "orm", ",", "entry", ",", "'content'", ",", "'multilingual_news_content'", ",", "'content'", ")" ]
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
2ddc076ce2002a9fa462dbba701441879b49a54d
https://github.com/bitlabstudio/django-multilingual-news/blob/2ddc076ce2002a9fa462dbba701441879b49a54d/multilingual_news/south_migrations/0006_migrate_placeholder_fields.py#L35-L42
train
asmeurer/iterm2-tools
iterm2_tools/images.py
image_bytes
def image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ if preserve_aspect_ratio is None: if width != 'auto' and height != 'auto': preserve_aspect_ratio = False else: preserve_aspect_ratio = True data = { 'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'), 'inline': inline, 'size': len(b), 'base64_img': base64.b64encode(b).decode('ascii'), 'width': width, 'height': height, 'preserve_aspect_ratio': int(preserve_aspect_ratio), } # IMAGE_CODE is a string because bytes doesn't support formatting return IMAGE_CODE.format(**data).encode('ascii')
python
def image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ if preserve_aspect_ratio is None: if width != 'auto' and height != 'auto': preserve_aspect_ratio = False else: preserve_aspect_ratio = True data = { 'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'), 'inline': inline, 'size': len(b), 'base64_img': base64.b64encode(b).decode('ascii'), 'width': width, 'height': height, 'preserve_aspect_ratio': int(preserve_aspect_ratio), } # IMAGE_CODE is a string because bytes doesn't support formatting return IMAGE_CODE.format(**data).encode('ascii')
[ "def", "image_bytes", "(", "b", ",", "filename", "=", "None", ",", "inline", "=", "1", ",", "width", "=", "'auto'", ",", "height", "=", "'auto'", ",", "preserve_aspect_ratio", "=", "None", ")", ":", "if", "preserve_aspect_ratio", "is", "None", ":", "if", "width", "!=", "'auto'", "and", "height", "!=", "'auto'", ":", "preserve_aspect_ratio", "=", "False", "else", ":", "preserve_aspect_ratio", "=", "True", "data", "=", "{", "'name'", ":", "base64", ".", "b64encode", "(", "(", "filename", "or", "'Unnamed file'", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "decode", "(", "'ascii'", ")", ",", "'inline'", ":", "inline", ",", "'size'", ":", "len", "(", "b", ")", ",", "'base64_img'", ":", "base64", ".", "b64encode", "(", "b", ")", ".", "decode", "(", "'ascii'", ")", ",", "'width'", ":", "width", ",", "'height'", ":", "height", ",", "'preserve_aspect_ratio'", ":", "int", "(", "preserve_aspect_ratio", ")", ",", "}", "# IMAGE_CODE is a string because bytes doesn't support formatting", "return", "IMAGE_CODE", ".", "format", "(", "*", "*", "data", ")", ".", "encode", "(", "'ascii'", ")" ]
Return a bytes string that displays image given by bytes b in the terminal If filename=None, the filename defaults to "Unnamed file" width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
[ "Return", "a", "bytes", "string", "that", "displays", "image", "given", "by", "bytes", "b", "in", "the", "terminal" ]
97b1b593bb02884521c2c05ed414f178de0b934e
https://github.com/asmeurer/iterm2-tools/blob/97b1b593bb02884521c2c05ed414f178de0b934e/iterm2_tools/images.py#L15-L54
train
asmeurer/iterm2-tools
iterm2_tools/images.py
display_image_bytes
def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio)) sys.stdout.write('\n')
python
def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto', preserve_aspect_ratio=None): """ Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio)) sys.stdout.write('\n')
[ "def", "display_image_bytes", "(", "b", ",", "filename", "=", "None", ",", "inline", "=", "1", ",", "width", "=", "'auto'", ",", "height", "=", "'auto'", ",", "preserve_aspect_ratio", "=", "None", ")", ":", "sys", ".", "stdout", ".", "buffer", ".", "write", "(", "image_bytes", "(", "b", ",", "filename", "=", "filename", ",", "inline", "=", "inline", ",", "width", "=", "width", ",", "height", "=", "height", ",", "preserve_aspect_ratio", "=", "preserve_aspect_ratio", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")" ]
Display the image given by the bytes b in the terminal. If filename=None the filename defaults to "Unnamed file". width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
[ "Display", "the", "image", "given", "by", "the", "bytes", "b", "in", "the", "terminal", "." ]
97b1b593bb02884521c2c05ed414f178de0b934e
https://github.com/asmeurer/iterm2-tools/blob/97b1b593bb02884521c2c05ed414f178de0b934e/iterm2_tools/images.py#L56-L83
train
asmeurer/iterm2-tools
iterm2_tools/images.py
display_image_file
def display_image_file(fn, width='auto', height='auto', preserve_aspect_ratio=None): """ Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f: sys.stdout.buffer.write(image_bytes(f.read(), filename=fn, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio))
python
def display_image_file(fn, width='auto', height='auto', preserve_aspect_ratio=None): """ Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html """ with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f: sys.stdout.buffer.write(image_bytes(f.read(), filename=fn, width=width, height=height, preserve_aspect_ratio=preserve_aspect_ratio))
[ "def", "display_image_file", "(", "fn", ",", "width", "=", "'auto'", ",", "height", "=", "'auto'", ",", "preserve_aspect_ratio", "=", "None", ")", ":", "with", "open", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "fn", ")", ")", ",", "'rb'", ")", "as", "f", ":", "sys", ".", "stdout", ".", "buffer", ".", "write", "(", "image_bytes", "(", "f", ".", "read", "(", ")", ",", "filename", "=", "fn", ",", "width", "=", "width", ",", "height", "=", "height", ",", "preserve_aspect_ratio", "=", "preserve_aspect_ratio", ")", ")" ]
Display an image in the terminal. A newline is not printed. width and height are strings, following the format N: N character cells. Npx: N pixels. N%: N percent of the session's width or height. 'auto': The image's inherent size will be used to determine an appropriate dimension. preserve_aspect_ratio sets whether the aspect ratio of the image is preserved. The default (None) is True unless both width and height are set. See https://www.iterm2.com/documentation-images.html
[ "Display", "an", "image", "in", "the", "terminal", "." ]
97b1b593bb02884521c2c05ed414f178de0b934e
https://github.com/asmeurer/iterm2-tools/blob/97b1b593bb02884521c2c05ed414f178de0b934e/iterm2_tools/images.py#L85-L112
train
xolox/python-qpass
setup.py
get_requirements
def get_requirements(*args): """Get requirements from pip requirement files.""" requirements = set() contents = get_contents(*args) for line in contents.splitlines(): # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements)
python
def get_requirements(*args): """Get requirements from pip requirement files.""" requirements = set() contents = get_contents(*args) for line in contents.splitlines(): # Strip comments. line = re.sub(r'^#.*|\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\s+', '', line)) return sorted(requirements)
[ "def", "get_requirements", "(", "*", "args", ")", ":", "requirements", "=", "set", "(", ")", "contents", "=", "get_contents", "(", "*", "args", ")", "for", "line", "in", "contents", ".", "splitlines", "(", ")", ":", "# Strip comments.", "line", "=", "re", ".", "sub", "(", "r'^#.*|\\s#.*'", ",", "''", ",", "line", ")", "# Ignore empty lines", "if", "line", "and", "not", "line", ".", "isspace", "(", ")", ":", "requirements", ".", "add", "(", "re", ".", "sub", "(", "r'\\s+'", ",", "''", ",", "line", ")", ")", "return", "sorted", "(", "requirements", ")" ]
Get requirements from pip requirement files.
[ "Get", "requirements", "from", "pip", "requirement", "files", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/setup.py#L44-L54
train
ministryofjustice/govuk-bank-holidays
govuk_bank_holidays/bank_holidays.py
BankHolidays.get_holidays
def get_holidays(self, division=None, year=None): """ Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc """ if division: holidays = self.data[division] else: holidays = self.data[self.ENGLAND_AND_WALES] dates_in_common = six.moves.reduce( set.intersection, (set(map(lambda holiday: holiday['date'], division_holidays)) for division, division_holidays in six.iteritems(self.data)) ) holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays) if year: holidays = filter(lambda holiday: holiday['date'].year == year, holidays) return list(holidays)
python
def get_holidays(self, division=None, year=None): """ Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc """ if division: holidays = self.data[division] else: holidays = self.data[self.ENGLAND_AND_WALES] dates_in_common = six.moves.reduce( set.intersection, (set(map(lambda holiday: holiday['date'], division_holidays)) for division, division_holidays in six.iteritems(self.data)) ) holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays) if year: holidays = filter(lambda holiday: holiday['date'].year == year, holidays) return list(holidays)
[ "def", "get_holidays", "(", "self", ",", "division", "=", "None", ",", "year", "=", "None", ")", ":", "if", "division", ":", "holidays", "=", "self", ".", "data", "[", "division", "]", "else", ":", "holidays", "=", "self", ".", "data", "[", "self", ".", "ENGLAND_AND_WALES", "]", "dates_in_common", "=", "six", ".", "moves", ".", "reduce", "(", "set", ".", "intersection", ",", "(", "set", "(", "map", "(", "lambda", "holiday", ":", "holiday", "[", "'date'", "]", ",", "division_holidays", ")", ")", "for", "division", ",", "division_holidays", "in", "six", ".", "iteritems", "(", "self", ".", "data", ")", ")", ")", "holidays", "=", "filter", "(", "lambda", "holiday", ":", "holiday", "[", "'date'", "]", "in", "dates_in_common", ",", "holidays", ")", "if", "year", ":", "holidays", "=", "filter", "(", "lambda", "holiday", ":", "holiday", "[", "'date'", "]", ".", "year", "==", "year", ",", "holidays", ")", "return", "list", "(", "holidays", ")" ]
Gets a list of all known bank holidays, optionally filtered by division and/or year :param division: see division constants; defaults to common holidays :param year: defaults to all available years :return: list of dicts with titles, dates, etc
[ "Gets", "a", "list", "of", "all", "known", "bank", "holidays", "optionally", "filtered", "by", "division", "and", "/", "or", "year", ":", "param", "division", ":", "see", "division", "constants", ";", "defaults", "to", "common", "holidays", ":", "param", "year", ":", "defaults", "to", "all", "available", "years", ":", "return", ":", "list", "of", "dicts", "with", "titles", "dates", "etc" ]
76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67
https://github.com/ministryofjustice/govuk-bank-holidays/blob/76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67/govuk_bank_holidays/bank_holidays.py#L87-L106
train
ministryofjustice/govuk-bank-holidays
govuk_bank_holidays/bank_holidays.py
BankHolidays.get_next_holiday
def get_next_holiday(self, division=None, date=None): """ Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict """ date = date or datetime.date.today() for holiday in self.get_holidays(division=division): if holiday['date'] > date: return holiday
python
def get_next_holiday(self, division=None, date=None): """ Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict """ date = date or datetime.date.today() for holiday in self.get_holidays(division=division): if holiday['date'] > date: return holiday
[ "def", "get_next_holiday", "(", "self", ",", "division", "=", "None", ",", "date", "=", "None", ")", ":", "date", "=", "date", "or", "datetime", ".", "date", ".", "today", "(", ")", "for", "holiday", "in", "self", ".", "get_holidays", "(", "division", "=", "division", ")", ":", "if", "holiday", "[", "'date'", "]", ">", "date", ":", "return", "holiday" ]
Returns the next known bank holiday :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: dict
[ "Returns", "the", "next", "known", "bank", "holiday", ":", "param", "division", ":", "see", "division", "constants", ";", "defaults", "to", "common", "holidays", ":", "param", "date", ":", "search", "starting", "from", "this", "date", ";", "defaults", "to", "today", ":", "return", ":", "dict" ]
76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67
https://github.com/ministryofjustice/govuk-bank-holidays/blob/76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67/govuk_bank_holidays/bank_holidays.py#L108-L118
train
ministryofjustice/govuk-bank-holidays
govuk_bank_holidays/bank_holidays.py
BankHolidays.is_holiday
def is_holiday(self, date, division=None): """ True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date in (holiday['date'] for holiday in self.get_holidays(division=division))
python
def is_holiday(self, date, division=None): """ True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date in (holiday['date'] for holiday in self.get_holidays(division=division))
[ "def", "is_holiday", "(", "self", ",", "date", ",", "division", "=", "None", ")", ":", "return", "date", "in", "(", "holiday", "[", "'date'", "]", "for", "holiday", "in", "self", ".", "get_holidays", "(", "division", "=", "division", ")", ")" ]
True if the date is a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool
[ "True", "if", "the", "date", "is", "a", "known", "bank", "holiday", ":", "param", "date", ":", "the", "date", "to", "check", ":", "param", "division", ":", "see", "division", "constants", ";", "defaults", "to", "common", "holidays", ":", "return", ":", "bool" ]
76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67
https://github.com/ministryofjustice/govuk-bank-holidays/blob/76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67/govuk_bank_holidays/bank_holidays.py#L120-L127
train
ministryofjustice/govuk-bank-holidays
govuk_bank_holidays/bank_holidays.py
BankHolidays.get_next_work_day
def get_next_work_day(self, division=None, date=None): """ Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict """ date = date or datetime.date.today() one_day = datetime.timedelta(days=1) holidays = set(holiday['date'] for holiday in self.get_holidays(division=division)) while True: date += one_day if date.weekday() not in self.weekend and date not in holidays: return date
python
def get_next_work_day(self, division=None, date=None): """ Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict """ date = date or datetime.date.today() one_day = datetime.timedelta(days=1) holidays = set(holiday['date'] for holiday in self.get_holidays(division=division)) while True: date += one_day if date.weekday() not in self.weekend and date not in holidays: return date
[ "def", "get_next_work_day", "(", "self", ",", "division", "=", "None", ",", "date", "=", "None", ")", ":", "date", "=", "date", "or", "datetime", ".", "date", ".", "today", "(", ")", "one_day", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "holidays", "=", "set", "(", "holiday", "[", "'date'", "]", "for", "holiday", "in", "self", ".", "get_holidays", "(", "division", "=", "division", ")", ")", "while", "True", ":", "date", "+=", "one_day", "if", "date", ".", "weekday", "(", ")", "not", "in", "self", ".", "weekend", "and", "date", "not", "in", "holidays", ":", "return", "date" ]
Returns the next work day, skipping weekends and bank holidays :param division: see division constants; defaults to common holidays :param date: search starting from this date; defaults to today :return: datetime.date; NB: get_next_holiday returns a dict
[ "Returns", "the", "next", "work", "day", "skipping", "weekends", "and", "bank", "holidays", ":", "param", "division", ":", "see", "division", "constants", ";", "defaults", "to", "common", "holidays", ":", "param", "date", ":", "search", "starting", "from", "this", "date", ";", "defaults", "to", "today", ":", "return", ":", "datetime", ".", "date", ";", "NB", ":", "get_next_holiday", "returns", "a", "dict" ]
76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67
https://github.com/ministryofjustice/govuk-bank-holidays/blob/76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67/govuk_bank_holidays/bank_holidays.py#L129-L142
train
ministryofjustice/govuk-bank-holidays
govuk_bank_holidays/bank_holidays.py
BankHolidays.is_work_day
def is_work_day(self, date, division=None): """ True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date.weekday() not in self.weekend and date not in ( holiday['date'] for holiday in self.get_holidays(division=division) )
python
def is_work_day(self, date, division=None): """ True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool """ return date.weekday() not in self.weekend and date not in ( holiday['date'] for holiday in self.get_holidays(division=division) )
[ "def", "is_work_day", "(", "self", ",", "date", ",", "division", "=", "None", ")", ":", "return", "date", ".", "weekday", "(", ")", "not", "in", "self", ".", "weekend", "and", "date", "not", "in", "(", "holiday", "[", "'date'", "]", "for", "holiday", "in", "self", ".", "get_holidays", "(", "division", "=", "division", ")", ")" ]
True if the date is not a weekend or a known bank holiday :param date: the date to check :param division: see division constants; defaults to common holidays :return: bool
[ "True", "if", "the", "date", "is", "not", "a", "weekend", "or", "a", "known", "bank", "holiday", ":", "param", "date", ":", "the", "date", "to", "check", ":", "param", "division", ":", "see", "division", "constants", ";", "defaults", "to", "common", "holidays", ":", "return", ":", "bool" ]
76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67
https://github.com/ministryofjustice/govuk-bank-holidays/blob/76c2f0e29890cee71f0ab99fe1a89cc92c7c4a67/govuk_bank_holidays/bank_holidays.py#L144-L153
train
blixt/py-starbound
starbound/__init__.py
World.get_all_regions_with_tiles
def get_all_regions_with_tiles(self): """ Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data """ for key in self.get_all_keys(): (layer, rx, ry) = struct.unpack('>BHH', key) if layer == 1: yield (rx, ry)
python
def get_all_regions_with_tiles(self): """ Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data """ for key in self.get_all_keys(): (layer, rx, ry) = struct.unpack('>BHH', key) if layer == 1: yield (rx, ry)
[ "def", "get_all_regions_with_tiles", "(", "self", ")", ":", "for", "key", "in", "self", ".", "get_all_keys", "(", ")", ":", "(", "layer", ",", "rx", ",", "ry", ")", "=", "struct", ".", "unpack", "(", "'>BHH'", ",", "key", ")", "if", "layer", "==", "1", ":", "yield", "(", "rx", ",", "ry", ")" ]
Generator which yields a set of (rx, ry) tuples which describe all regions for which the world has tile data
[ "Generator", "which", "yields", "a", "set", "of", "(", "rx", "ry", ")", "tuples", "which", "describe", "all", "regions", "for", "which", "the", "world", "has", "tile", "data" ]
68a2f6bfef73d8803191f937c69005a64eeae017
https://github.com/blixt/py-starbound/blob/68a2f6bfef73d8803191f937c69005a64eeae017/starbound/__init__.py#L95-L103
train
blixt/py-starbound
starbound/__init__.py
World.get_entity_uuid_coords
def get_entity_uuid_coords(self, uuid): """ Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found. """ if uuid in self._entity_to_region_map: coords = self._entity_to_region_map[uuid] entities = self.get_entities(*coords) for entity in entities: if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid: return tuple(entity.data['tilePosition']) return None
python
def get_entity_uuid_coords(self, uuid): """ Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found. """ if uuid in self._entity_to_region_map: coords = self._entity_to_region_map[uuid] entities = self.get_entities(*coords) for entity in entities: if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid: return tuple(entity.data['tilePosition']) return None
[ "def", "get_entity_uuid_coords", "(", "self", ",", "uuid", ")", ":", "if", "uuid", "in", "self", ".", "_entity_to_region_map", ":", "coords", "=", "self", ".", "_entity_to_region_map", "[", "uuid", "]", "entities", "=", "self", ".", "get_entities", "(", "*", "coords", ")", "for", "entity", "in", "entities", ":", "if", "'uniqueId'", "in", "entity", ".", "data", "and", "entity", ".", "data", "[", "'uniqueId'", "]", "==", "uuid", ":", "return", "tuple", "(", "entity", ".", "data", "[", "'tilePosition'", "]", ")", "return", "None" ]
Returns the coordinates of the given entity UUID inside this world, or `None` if the UUID is not found.
[ "Returns", "the", "coordinates", "of", "the", "given", "entity", "UUID", "inside", "this", "world", "or", "None", "if", "the", "UUID", "is", "not", "found", "." ]
68a2f6bfef73d8803191f937c69005a64eeae017
https://github.com/blixt/py-starbound/blob/68a2f6bfef73d8803191f937c69005a64eeae017/starbound/__init__.py#L110-L121
train
blixt/py-starbound
starbound/__init__.py
World._entity_to_region_map
def _entity_to_region_map(self): """ A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world. """ entity_to_region = {} for key in self.get_all_keys(): layer, rx, ry = struct.unpack('>BHH', key) if layer != 4: continue stream = io.BytesIO(self.get(layer, rx, ry)) num_entities = sbon.read_varint(stream) for _ in range(num_entities): uuid = sbon.read_string(stream) if uuid in entity_to_region: raise ValueError('Duplicate UUID {}'.format(uuid)) entity_to_region[uuid] = (rx, ry) return entity_to_region
python
def _entity_to_region_map(self): """ A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world. """ entity_to_region = {} for key in self.get_all_keys(): layer, rx, ry = struct.unpack('>BHH', key) if layer != 4: continue stream = io.BytesIO(self.get(layer, rx, ry)) num_entities = sbon.read_varint(stream) for _ in range(num_entities): uuid = sbon.read_string(stream) if uuid in entity_to_region: raise ValueError('Duplicate UUID {}'.format(uuid)) entity_to_region[uuid] = (rx, ry) return entity_to_region
[ "def", "_entity_to_region_map", "(", "self", ")", ":", "entity_to_region", "=", "{", "}", "for", "key", "in", "self", ".", "get_all_keys", "(", ")", ":", "layer", ",", "rx", ",", "ry", "=", "struct", ".", "unpack", "(", "'>BHH'", ",", "key", ")", "if", "layer", "!=", "4", ":", "continue", "stream", "=", "io", ".", "BytesIO", "(", "self", ".", "get", "(", "layer", ",", "rx", ",", "ry", ")", ")", "num_entities", "=", "sbon", ".", "read_varint", "(", "stream", ")", "for", "_", "in", "range", "(", "num_entities", ")", ":", "uuid", "=", "sbon", ".", "read_string", "(", "stream", ")", "if", "uuid", "in", "entity_to_region", ":", "raise", "ValueError", "(", "'Duplicate UUID {}'", ".", "format", "(", "uuid", ")", ")", "entity_to_region", "[", "uuid", "]", "=", "(", "rx", ",", "ry", ")", "return", "entity_to_region" ]
A dict whose keys are the UUIDs (or just IDs, in some cases) of entities, and whose values are the `(rx, ry)` coordinates in which that entity can be found. This can be used to easily locate particular entities inside the world.
[ "A", "dict", "whose", "keys", "are", "the", "UUIDs", "(", "or", "just", "IDs", "in", "some", "cases", ")", "of", "entities", "and", "whose", "values", "are", "the", "(", "rx", "ry", ")", "coordinates", "in", "which", "that", "entity", "can", "be", "found", ".", "This", "can", "be", "used", "to", "easily", "locate", "particular", "entities", "inside", "the", "world", "." ]
68a2f6bfef73d8803191f937c69005a64eeae017
https://github.com/blixt/py-starbound/blob/68a2f6bfef73d8803191f937c69005a64eeae017/starbound/__init__.py#L149-L168
train
xolox/python-qpass
qpass/__init__.py
create_fuzzy_pattern
def create_fuzzy_pattern(pattern): """ Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression. """ return re.compile(".*".join(map(re.escape, pattern)), re.IGNORECASE)
python
def create_fuzzy_pattern(pattern): """ Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression. """ return re.compile(".*".join(map(re.escape, pattern)), re.IGNORECASE)
[ "def", "create_fuzzy_pattern", "(", "pattern", ")", ":", "return", "re", ".", "compile", "(", "\".*\"", ".", "join", "(", "map", "(", "re", ".", "escape", ",", "pattern", ")", ")", ",", "re", ".", "IGNORECASE", ")" ]
Convert a string into a fuzzy regular expression pattern. :param pattern: The input pattern (a string). :returns: A compiled regular expression object. This function works by adding ``.*`` between each of the characters in the input pattern and compiling the resulting expression into a case insensitive regular expression.
[ "Convert", "a", "string", "into", "a", "fuzzy", "regular", "expression", "pattern", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L431-L442
train
xolox/python-qpass
qpass/__init__.py
AbstractPasswordStore.filtered_entries
def filtered_entries(self): """A list of :class:`PasswordEntry` objects that don't match the exclude list.""" return [ e for e in self.entries if not any(fnmatch.fnmatch(e.name.lower(), p.lower()) for p in self.exclude_list) ]
python
def filtered_entries(self): """A list of :class:`PasswordEntry` objects that don't match the exclude list.""" return [ e for e in self.entries if not any(fnmatch.fnmatch(e.name.lower(), p.lower()) for p in self.exclude_list) ]
[ "def", "filtered_entries", "(", "self", ")", ":", "return", "[", "e", "for", "e", "in", "self", ".", "entries", "if", "not", "any", "(", "fnmatch", ".", "fnmatch", "(", "e", ".", "name", ".", "lower", "(", ")", ",", "p", ".", "lower", "(", ")", ")", "for", "p", "in", "self", ".", "exclude_list", ")", "]" ]
A list of :class:`PasswordEntry` objects that don't match the exclude list.
[ "A", "list", "of", ":", "class", ":", "PasswordEntry", "objects", "that", "don", "t", "match", "the", "exclude", "list", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L104-L108
train
xolox/python-qpass
qpass/__init__.py
AbstractPasswordStore.fuzzy_search
def fuzzy_search(self, *filters): """ Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). """ matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
python
def fuzzy_search(self, *filters): """ Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). """ matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
[ "def", "fuzzy_search", "(", "self", ",", "*", "filters", ")", ":", "matches", "=", "[", "]", "logger", ".", "verbose", "(", "\"Performing fuzzy search on %s (%s) ..\"", ",", "pluralize", "(", "len", "(", "filters", ")", ",", "\"pattern\"", ")", ",", "concatenate", "(", "map", "(", "repr", ",", "filters", ")", ")", ")", "patterns", "=", "list", "(", "map", "(", "create_fuzzy_pattern", ",", "filters", ")", ")", "for", "entry", "in", "self", ".", "filtered_entries", ":", "if", "all", "(", "p", ".", "search", "(", "entry", ".", "name", ")", "for", "p", "in", "patterns", ")", ":", "matches", ".", "append", "(", "entry", ")", "logger", ".", "log", "(", "logging", ".", "INFO", "if", "matches", "else", "logging", ".", "VERBOSE", ",", "\"Matched %s using fuzzy search.\"", ",", "pluralize", "(", "len", "(", "matches", ")", ",", "\"password\"", ")", ",", ")", "return", "matches" ]
Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings).
[ "Perform", "a", "fuzzy", "search", "that", "matches", "the", "given", "characters", "in", "the", "given", "order", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L110-L130
train
xolox/python-qpass
qpass/__init__.py
AbstractPasswordStore.select_entry
def select_entry(self, *arguments): """ Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). """ matches = self.smart_search(*arguments) if len(matches) > 1: logger.info("More than one match, prompting for choice ..") labels = [entry.name for entry in matches] return matches[labels.index(prompt_for_choice(labels))] else: logger.info("Matched one entry: %s", matches[0].name) return matches[0]
python
def select_entry(self, *arguments): """ Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). """ matches = self.smart_search(*arguments) if len(matches) > 1: logger.info("More than one match, prompting for choice ..") labels = [entry.name for entry in matches] return matches[labels.index(prompt_for_choice(labels))] else: logger.info("Matched one entry: %s", matches[0].name) return matches[0]
[ "def", "select_entry", "(", "self", ",", "*", "arguments", ")", ":", "matches", "=", "self", ".", "smart_search", "(", "*", "arguments", ")", "if", "len", "(", "matches", ")", ">", "1", ":", "logger", ".", "info", "(", "\"More than one match, prompting for choice ..\"", ")", "labels", "=", "[", "entry", ".", "name", "for", "entry", "in", "matches", "]", "return", "matches", "[", "labels", ".", "index", "(", "prompt_for_choice", "(", "labels", ")", ")", "]", "else", ":", "logger", ".", "info", "(", "\"Matched one entry: %s\"", ",", "matches", "[", "0", "]", ".", "name", ")", "return", "matches", "[", "0", "]" ]
Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`).
[ "Select", "a", "password", "from", "the", "available", "choices", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L132-L147
train
xolox/python-qpass
qpass/__init__.py
AbstractPasswordStore.simple_search
def simple_search(self, *keywords): """ Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned. """ matches = [] keywords = [kw.lower() for kw in keywords] logger.verbose( "Performing simple search on %s (%s) ..", pluralize(len(keywords), "keyword"), concatenate(map(repr, keywords)), ) for entry in self.filtered_entries: normalized = entry.name.lower() if all(kw in normalized for kw in keywords): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using simple search.", pluralize(len(matches), "password"), ) return matches
python
def simple_search(self, *keywords): """ Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned. """ matches = [] keywords = [kw.lower() for kw in keywords] logger.verbose( "Performing simple search on %s (%s) ..", pluralize(len(keywords), "keyword"), concatenate(map(repr, keywords)), ) for entry in self.filtered_entries: normalized = entry.name.lower() if all(kw in normalized for kw in keywords): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using simple search.", pluralize(len(matches), "password"), ) return matches
[ "def", "simple_search", "(", "self", ",", "*", "keywords", ")", ":", "matches", "=", "[", "]", "keywords", "=", "[", "kw", ".", "lower", "(", ")", "for", "kw", "in", "keywords", "]", "logger", ".", "verbose", "(", "\"Performing simple search on %s (%s) ..\"", ",", "pluralize", "(", "len", "(", "keywords", ")", ",", "\"keyword\"", ")", ",", "concatenate", "(", "map", "(", "repr", ",", "keywords", ")", ")", ",", ")", "for", "entry", "in", "self", ".", "filtered_entries", ":", "normalized", "=", "entry", ".", "name", ".", "lower", "(", ")", "if", "all", "(", "kw", "in", "normalized", "for", "kw", "in", "keywords", ")", ":", "matches", ".", "append", "(", "entry", ")", "logger", ".", "log", "(", "logging", ".", "INFO", "if", "matches", "else", "logging", ".", "VERBOSE", ",", "\"Matched %s using simple search.\"", ",", "pluralize", "(", "len", "(", "matches", ")", ",", "\"password\"", ")", ",", ")", "return", "matches" ]
Perform a simple search for case insensitive substring matches. :param keywords: The string(s) to search for. :returns: The matched password names (a generator of strings). Only passwords whose names matches *all* of the given keywords are returned.
[ "Perform", "a", "simple", "search", "for", "case", "insensitive", "substring", "matches", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L149-L175
train
xolox/python-qpass
qpass/__init__.py
AbstractPasswordStore.smart_search
def smart_search(self, *arguments): """ Perform a smart search on the given keywords or patterns. :param arguments: The keywords or patterns to search for. :returns: The matched password names (a list of strings). :raises: The following exceptions can be raised: - :exc:`.NoMatchingPasswordError` when no matching passwords are found. - :exc:`.EmptyPasswordStoreError` when the password store is empty. This method first tries :func:`simple_search()` and if that doesn't produce any matches it will fall back to :func:`fuzzy_search()`. If no matches are found an exception is raised (see above). """ matches = self.simple_search(*arguments) if not matches: logger.verbose("Falling back from substring search to fuzzy search ..") matches = self.fuzzy_search(*arguments) if not matches: if len(self.filtered_entries) > 0: raise NoMatchingPasswordError( format("No passwords matched the given arguments! (%s)", concatenate(map(repr, arguments))) ) else: msg = "You don't have any passwords yet! (no *.gpg files found)" raise EmptyPasswordStoreError(msg) return matches
python
def smart_search(self, *arguments): """ Perform a smart search on the given keywords or patterns. :param arguments: The keywords or patterns to search for. :returns: The matched password names (a list of strings). :raises: The following exceptions can be raised: - :exc:`.NoMatchingPasswordError` when no matching passwords are found. - :exc:`.EmptyPasswordStoreError` when the password store is empty. This method first tries :func:`simple_search()` and if that doesn't produce any matches it will fall back to :func:`fuzzy_search()`. If no matches are found an exception is raised (see above). """ matches = self.simple_search(*arguments) if not matches: logger.verbose("Falling back from substring search to fuzzy search ..") matches = self.fuzzy_search(*arguments) if not matches: if len(self.filtered_entries) > 0: raise NoMatchingPasswordError( format("No passwords matched the given arguments! (%s)", concatenate(map(repr, arguments))) ) else: msg = "You don't have any passwords yet! (no *.gpg files found)" raise EmptyPasswordStoreError(msg) return matches
[ "def", "smart_search", "(", "self", ",", "*", "arguments", ")", ":", "matches", "=", "self", ".", "simple_search", "(", "*", "arguments", ")", "if", "not", "matches", ":", "logger", ".", "verbose", "(", "\"Falling back from substring search to fuzzy search ..\"", ")", "matches", "=", "self", ".", "fuzzy_search", "(", "*", "arguments", ")", "if", "not", "matches", ":", "if", "len", "(", "self", ".", "filtered_entries", ")", ">", "0", ":", "raise", "NoMatchingPasswordError", "(", "format", "(", "\"No passwords matched the given arguments! (%s)\"", ",", "concatenate", "(", "map", "(", "repr", ",", "arguments", ")", ")", ")", ")", "else", ":", "msg", "=", "\"You don't have any passwords yet! (no *.gpg files found)\"", "raise", "EmptyPasswordStoreError", "(", "msg", ")", "return", "matches" ]
Perform a smart search on the given keywords or patterns. :param arguments: The keywords or patterns to search for. :returns: The matched password names (a list of strings). :raises: The following exceptions can be raised: - :exc:`.NoMatchingPasswordError` when no matching passwords are found. - :exc:`.EmptyPasswordStoreError` when the password store is empty. This method first tries :func:`simple_search()` and if that doesn't produce any matches it will fall back to :func:`fuzzy_search()`. If no matches are found an exception is raised (see above).
[ "Perform", "a", "smart", "search", "on", "the", "given", "keywords", "or", "patterns", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L177-L204
train
xolox/python-qpass
qpass/__init__.py
QuickPass.entries
def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
python
def entries(self): """A list of :class:`PasswordEntry` objects.""" passwords = [] for store in self.stores: passwords.extend(store.entries) return natsort(passwords, key=lambda e: e.name)
[ "def", "entries", "(", "self", ")", ":", "passwords", "=", "[", "]", "for", "store", "in", "self", ".", "stores", ":", "passwords", ".", "extend", "(", "store", ".", "entries", ")", "return", "natsort", "(", "passwords", ",", "key", "=", "lambda", "e", ":", "e", ".", "name", ")" ]
A list of :class:`PasswordEntry` objects.
[ "A", "list", "of", ":", "class", ":", "PasswordEntry", "objects", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L219-L224
train
xolox/python-qpass
qpass/__init__.py
PasswordStore.context
def context(self): """ An execution context created using :mod:`executor.contexts`. The value of :attr:`context` defaults to a :class:`~executor.contexts.LocalContext` object with the following characteristics: - The working directory of the execution context is set to the value of :attr:`directory`. - The environment variable given by :data:`DIRECTORY_VARIABLE` is set to the value of :attr:`directory`. :raises: :exc:`.MissingPasswordStoreError` when :attr:`directory` doesn't exist. """ # Make sure the directory exists. self.ensure_directory_exists() # Prepare the environment variables. environment = {DIRECTORY_VARIABLE: self.directory} try: # Try to enable the GPG agent in headless sessions. environment.update(get_gpg_variables()) except Exception: # If we failed then let's at least make sure that the # $GPG_TTY environment variable is set correctly. environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True)) return LocalContext(directory=self.directory, environment=environment)
python
def context(self): """ An execution context created using :mod:`executor.contexts`. The value of :attr:`context` defaults to a :class:`~executor.contexts.LocalContext` object with the following characteristics: - The working directory of the execution context is set to the value of :attr:`directory`. - The environment variable given by :data:`DIRECTORY_VARIABLE` is set to the value of :attr:`directory`. :raises: :exc:`.MissingPasswordStoreError` when :attr:`directory` doesn't exist. """ # Make sure the directory exists. self.ensure_directory_exists() # Prepare the environment variables. environment = {DIRECTORY_VARIABLE: self.directory} try: # Try to enable the GPG agent in headless sessions. environment.update(get_gpg_variables()) except Exception: # If we failed then let's at least make sure that the # $GPG_TTY environment variable is set correctly. environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True)) return LocalContext(directory=self.directory, environment=environment)
[ "def", "context", "(", "self", ")", ":", "# Make sure the directory exists.", "self", ".", "ensure_directory_exists", "(", ")", "# Prepare the environment variables.", "environment", "=", "{", "DIRECTORY_VARIABLE", ":", "self", ".", "directory", "}", "try", ":", "# Try to enable the GPG agent in headless sessions.", "environment", ".", "update", "(", "get_gpg_variables", "(", ")", ")", "except", "Exception", ":", "# If we failed then let's at least make sure that the", "# $GPG_TTY environment variable is set correctly.", "environment", ".", "update", "(", "GPG_TTY", "=", "execute", "(", "\"tty\"", ",", "capture", "=", "True", ",", "check", "=", "False", ",", "tty", "=", "True", ",", "silent", "=", "True", ")", ")", "return", "LocalContext", "(", "directory", "=", "self", ".", "directory", ",", "environment", "=", "environment", ")" ]
An execution context created using :mod:`executor.contexts`. The value of :attr:`context` defaults to a :class:`~executor.contexts.LocalContext` object with the following characteristics: - The working directory of the execution context is set to the value of :attr:`directory`. - The environment variable given by :data:`DIRECTORY_VARIABLE` is set to the value of :attr:`directory`. :raises: :exc:`.MissingPasswordStoreError` when :attr:`directory` doesn't exist.
[ "An", "execution", "context", "created", "using", ":", "mod", ":", "executor", ".", "contexts", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L244-L272
train
xolox/python-qpass
qpass/__init__.py
PasswordStore.directory
def directory(self, value): """Normalize the value of :attr:`directory` when it's set.""" # Normalize the value of `directory'. set_property(self, "directory", parse_path(value)) # Clear the computed values of `context' and `entries'. clear_property(self, "context") clear_property(self, "entries")
python
def directory(self, value): """Normalize the value of :attr:`directory` when it's set.""" # Normalize the value of `directory'. set_property(self, "directory", parse_path(value)) # Clear the computed values of `context' and `entries'. clear_property(self, "context") clear_property(self, "entries")
[ "def", "directory", "(", "self", ",", "value", ")", ":", "# Normalize the value of `directory'.", "set_property", "(", "self", ",", "\"directory\"", ",", "parse_path", "(", "value", ")", ")", "# Clear the computed values of `context' and `entries'.", "clear_property", "(", "self", ",", "\"context\"", ")", "clear_property", "(", "self", ",", "\"entries\"", ")" ]
Normalize the value of :attr:`directory` when it's set.
[ "Normalize", "the", "value", "of", ":", "attr", ":", "directory", "when", "it", "s", "set", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L292-L298
train
xolox/python-qpass
qpass/__init__.py
PasswordStore.entries
def entries(self): """A list of :class:`PasswordEntry` objects.""" timer = Timer() passwords = [] logger.info("Scanning %s ..", format_path(self.directory)) listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0") for filename in split(listing, "\0"): basename, extension = os.path.splitext(filename) if extension == ".gpg": # We use os.path.normpath() to remove the leading `./' prefixes # that `find' adds because it searches the working directory. passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self)) logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer) return natsort(passwords, key=lambda e: e.name)
python
def entries(self): """A list of :class:`PasswordEntry` objects.""" timer = Timer() passwords = [] logger.info("Scanning %s ..", format_path(self.directory)) listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0") for filename in split(listing, "\0"): basename, extension = os.path.splitext(filename) if extension == ".gpg": # We use os.path.normpath() to remove the leading `./' prefixes # that `find' adds because it searches the working directory. passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self)) logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer) return natsort(passwords, key=lambda e: e.name)
[ "def", "entries", "(", "self", ")", ":", "timer", "=", "Timer", "(", ")", "passwords", "=", "[", "]", "logger", ".", "info", "(", "\"Scanning %s ..\"", ",", "format_path", "(", "self", ".", "directory", ")", ")", "listing", "=", "self", ".", "context", ".", "capture", "(", "\"find\"", ",", "\"-type\"", ",", "\"f\"", ",", "\"-name\"", ",", "\"*.gpg\"", ",", "\"-print0\"", ")", "for", "filename", "in", "split", "(", "listing", ",", "\"\\0\"", ")", ":", "basename", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "extension", "==", "\".gpg\"", ":", "# We use os.path.normpath() to remove the leading `./' prefixes", "# that `find' adds because it searches the working directory.", "passwords", ".", "append", "(", "PasswordEntry", "(", "name", "=", "os", ".", "path", ".", "normpath", "(", "basename", ")", ",", "store", "=", "self", ")", ")", "logger", ".", "verbose", "(", "\"Found %s in %s.\"", ",", "pluralize", "(", "len", "(", "passwords", ")", ",", "\"password\"", ")", ",", "timer", ")", "return", "natsort", "(", "passwords", ",", "key", "=", "lambda", "e", ":", "e", ".", "name", ")" ]
A list of :class:`PasswordEntry` objects.
[ "A", "list", "of", ":", "class", ":", "PasswordEntry", "objects", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L301-L314
train
xolox/python-qpass
qpass/__init__.py
PasswordStore.ensure_directory_exists
def ensure_directory_exists(self): """ Make sure :attr:`directory` exists. :raises: :exc:`.MissingPasswordStoreError` when the password storage directory doesn't exist. """ if not os.path.isdir(self.directory): msg = "The password storage directory doesn't exist! (%s)" raise MissingPasswordStoreError(msg % self.directory)
python
def ensure_directory_exists(self): """ Make sure :attr:`directory` exists. :raises: :exc:`.MissingPasswordStoreError` when the password storage directory doesn't exist. """ if not os.path.isdir(self.directory): msg = "The password storage directory doesn't exist! (%s)" raise MissingPasswordStoreError(msg % self.directory)
[ "def", "ensure_directory_exists", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "directory", ")", ":", "msg", "=", "\"The password storage directory doesn't exist! (%s)\"", "raise", "MissingPasswordStoreError", "(", "msg", "%", "self", ".", "directory", ")" ]
Make sure :attr:`directory` exists. :raises: :exc:`.MissingPasswordStoreError` when the password storage directory doesn't exist.
[ "Make", "sure", ":", "attr", ":", "directory", "exists", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L316-L325
train
xolox/python-qpass
qpass/__init__.py
PasswordEntry.format_text
def format_text(self, include_password=True, use_colors=None, padding=True, filters=()): """ Format :attr:`text` for viewing on a terminal. :param include_password: :data:`True` to include the password in the formatted text, :data:`False` to exclude the password from the formatted text. :param use_colors: :data:`True` to use ANSI escape sequences, :data:`False` otherwise. When this is :data:`None` :func:`~humanfriendly.terminal.terminal_supports_colors()` will be used to detect whether ANSI escape sequences are supported. :param padding: :data:`True` to add empty lines before and after the entry and indent the entry's text with two spaces, :data:`False` to skip the padding. :param filters: An iterable of regular expression patterns (defaults to an empty tuple). If a line in the entry's text matches one of these patterns it won't be shown on the terminal. :returns: The formatted entry (a string). """ # Determine whether we can use ANSI escape sequences. if use_colors is None: use_colors = terminal_supports_colors() # Extract the password (first line) from the entry. lines = self.text.splitlines() password = lines.pop(0).strip() # Compile the given patterns to case insensitive regular expressions # and use them to ignore lines that match any of the given filters. patterns = [coerce_pattern(f, re.IGNORECASE) for f in filters] lines = [l for l in lines if not any(p.search(l) for p in patterns)] text = trim_empty_lines("\n".join(lines)) # Include the password in the formatted text? if include_password: text = "Password: %s\n%s" % (password, text) # Add the name to the entry (only when there's something to show). if text and not text.isspace(): title = " / ".join(split(self.name, "/")) if use_colors: title = ansi_wrap(title, bold=True) text = "%s\n\n%s" % (title, text) # Highlight the entry's text using ANSI escape sequences. lines = [] for line in text.splitlines(): # Check for a "Key: Value" line. match = KEY_VALUE_PATTERN.match(line) if match: key = "%s:" % match.group(1).strip() value = match.group(2).strip() if use_colors: # Highlight the key. key = ansi_wrap(key, color=HIGHLIGHT_COLOR) # Underline hyperlinks in the value. tokens = value.split() for i in range(len(tokens)): if "://" in tokens[i]: tokens[i] = ansi_wrap(tokens[i], underline=True) # Replace the line with a highlighted version. line = key + " " + " ".join(tokens) if padding: line = " " + line lines.append(line) text = "\n".join(lines) text = trim_empty_lines(text) if text and padding: text = "\n%s\n" % text return text
python
def format_text(self, include_password=True, use_colors=None, padding=True, filters=()): """ Format :attr:`text` for viewing on a terminal. :param include_password: :data:`True` to include the password in the formatted text, :data:`False` to exclude the password from the formatted text. :param use_colors: :data:`True` to use ANSI escape sequences, :data:`False` otherwise. When this is :data:`None` :func:`~humanfriendly.terminal.terminal_supports_colors()` will be used to detect whether ANSI escape sequences are supported. :param padding: :data:`True` to add empty lines before and after the entry and indent the entry's text with two spaces, :data:`False` to skip the padding. :param filters: An iterable of regular expression patterns (defaults to an empty tuple). If a line in the entry's text matches one of these patterns it won't be shown on the terminal. :returns: The formatted entry (a string). """ # Determine whether we can use ANSI escape sequences. if use_colors is None: use_colors = terminal_supports_colors() # Extract the password (first line) from the entry. lines = self.text.splitlines() password = lines.pop(0).strip() # Compile the given patterns to case insensitive regular expressions # and use them to ignore lines that match any of the given filters. patterns = [coerce_pattern(f, re.IGNORECASE) for f in filters] lines = [l for l in lines if not any(p.search(l) for p in patterns)] text = trim_empty_lines("\n".join(lines)) # Include the password in the formatted text? if include_password: text = "Password: %s\n%s" % (password, text) # Add the name to the entry (only when there's something to show). if text and not text.isspace(): title = " / ".join(split(self.name, "/")) if use_colors: title = ansi_wrap(title, bold=True) text = "%s\n\n%s" % (title, text) # Highlight the entry's text using ANSI escape sequences. lines = [] for line in text.splitlines(): # Check for a "Key: Value" line. match = KEY_VALUE_PATTERN.match(line) if match: key = "%s:" % match.group(1).strip() value = match.group(2).strip() if use_colors: # Highlight the key. key = ansi_wrap(key, color=HIGHLIGHT_COLOR) # Underline hyperlinks in the value. tokens = value.split() for i in range(len(tokens)): if "://" in tokens[i]: tokens[i] = ansi_wrap(tokens[i], underline=True) # Replace the line with a highlighted version. line = key + " " + " ".join(tokens) if padding: line = " " + line lines.append(line) text = "\n".join(lines) text = trim_empty_lines(text) if text and padding: text = "\n%s\n" % text return text
[ "def", "format_text", "(", "self", ",", "include_password", "=", "True", ",", "use_colors", "=", "None", ",", "padding", "=", "True", ",", "filters", "=", "(", ")", ")", ":", "# Determine whether we can use ANSI escape sequences.", "if", "use_colors", "is", "None", ":", "use_colors", "=", "terminal_supports_colors", "(", ")", "# Extract the password (first line) from the entry.", "lines", "=", "self", ".", "text", ".", "splitlines", "(", ")", "password", "=", "lines", ".", "pop", "(", "0", ")", ".", "strip", "(", ")", "# Compile the given patterns to case insensitive regular expressions", "# and use them to ignore lines that match any of the given filters.", "patterns", "=", "[", "coerce_pattern", "(", "f", ",", "re", ".", "IGNORECASE", ")", "for", "f", "in", "filters", "]", "lines", "=", "[", "l", "for", "l", "in", "lines", "if", "not", "any", "(", "p", ".", "search", "(", "l", ")", "for", "p", "in", "patterns", ")", "]", "text", "=", "trim_empty_lines", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ")", "# Include the password in the formatted text?", "if", "include_password", ":", "text", "=", "\"Password: %s\\n%s\"", "%", "(", "password", ",", "text", ")", "# Add the name to the entry (only when there's something to show).", "if", "text", "and", "not", "text", ".", "isspace", "(", ")", ":", "title", "=", "\" / \"", ".", "join", "(", "split", "(", "self", ".", "name", ",", "\"/\"", ")", ")", "if", "use_colors", ":", "title", "=", "ansi_wrap", "(", "title", ",", "bold", "=", "True", ")", "text", "=", "\"%s\\n\\n%s\"", "%", "(", "title", ",", "text", ")", "# Highlight the entry's text using ANSI escape sequences.", "lines", "=", "[", "]", "for", "line", "in", "text", ".", "splitlines", "(", ")", ":", "# Check for a \"Key: Value\" line.", "match", "=", "KEY_VALUE_PATTERN", ".", "match", "(", "line", ")", "if", "match", ":", "key", "=", "\"%s:\"", "%", "match", ".", "group", "(", "1", ")", ".", "strip", "(", ")", "value", "=", "match", ".", "group", "(", "2", ")", ".", "strip", "(", ")", "if", "use_colors", ":", "# Highlight the key.", "key", "=", "ansi_wrap", "(", "key", ",", "color", "=", "HIGHLIGHT_COLOR", ")", "# Underline hyperlinks in the value.", "tokens", "=", "value", ".", "split", "(", ")", "for", "i", "in", "range", "(", "len", "(", "tokens", ")", ")", ":", "if", "\"://\"", "in", "tokens", "[", "i", "]", ":", "tokens", "[", "i", "]", "=", "ansi_wrap", "(", "tokens", "[", "i", "]", ",", "underline", "=", "True", ")", "# Replace the line with a highlighted version.", "line", "=", "key", "+", "\" \"", "+", "\" \"", ".", "join", "(", "tokens", ")", "if", "padding", ":", "line", "=", "\" \"", "+", "line", "lines", ".", "append", "(", "line", ")", "text", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "text", "=", "trim_empty_lines", "(", "text", ")", "if", "text", "and", "padding", ":", "text", "=", "\"\\n%s\\n\"", "%", "text", "return", "text" ]
Format :attr:`text` for viewing on a terminal. :param include_password: :data:`True` to include the password in the formatted text, :data:`False` to exclude the password from the formatted text. :param use_colors: :data:`True` to use ANSI escape sequences, :data:`False` otherwise. When this is :data:`None` :func:`~humanfriendly.terminal.terminal_supports_colors()` will be used to detect whether ANSI escape sequences are supported. :param padding: :data:`True` to add empty lines before and after the entry and indent the entry's text with two spaces, :data:`False` to skip the padding. :param filters: An iterable of regular expression patterns (defaults to an empty tuple). If a line in the entry's text matches one of these patterns it won't be shown on the terminal. :returns: The formatted entry (a string).
[ "Format", ":", "attr", ":", "text", "for", "viewing", "on", "a", "terminal", "." ]
43ce447b0904ff42a54b8f1dd4d2479f950f258f
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L362-L428
train
darothen/xbpch
xbpch/util/diaginfo.py
get_diaginfo
def get_diaginfo(diaginfo_file): """ Read an output's diaginfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- diaginfo_file : str Path to diaginfo.dat Returns ------- DataFrame containing the category information. """ widths = [rec.width for rec in diag_recs] col_names = [rec.name for rec in diag_recs] dtypes = [rec.type for rec in diag_recs] usecols = [name for name in col_names if not name.startswith('-')] diag_df = pd.read_fwf(diaginfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) diag_desc = {diag.name: diag.desc for diag in diag_recs if not diag.name.startswith('-')} return diag_df, diag_desc
python
def get_diaginfo(diaginfo_file): """ Read an output's diaginfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- diaginfo_file : str Path to diaginfo.dat Returns ------- DataFrame containing the category information. """ widths = [rec.width for rec in diag_recs] col_names = [rec.name for rec in diag_recs] dtypes = [rec.type for rec in diag_recs] usecols = [name for name in col_names if not name.startswith('-')] diag_df = pd.read_fwf(diaginfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) diag_desc = {diag.name: diag.desc for diag in diag_recs if not diag.name.startswith('-')} return diag_df, diag_desc
[ "def", "get_diaginfo", "(", "diaginfo_file", ")", ":", "widths", "=", "[", "rec", ".", "width", "for", "rec", "in", "diag_recs", "]", "col_names", "=", "[", "rec", ".", "name", "for", "rec", "in", "diag_recs", "]", "dtypes", "=", "[", "rec", ".", "type", "for", "rec", "in", "diag_recs", "]", "usecols", "=", "[", "name", "for", "name", "in", "col_names", "if", "not", "name", ".", "startswith", "(", "'-'", ")", "]", "diag_df", "=", "pd", ".", "read_fwf", "(", "diaginfo_file", ",", "widths", "=", "widths", ",", "names", "=", "col_names", ",", "dtypes", "=", "dtypes", ",", "comment", "=", "\"#\"", ",", "header", "=", "None", ",", "usecols", "=", "usecols", ")", "diag_desc", "=", "{", "diag", ".", "name", ":", "diag", ".", "desc", "for", "diag", "in", "diag_recs", "if", "not", "diag", ".", "name", ".", "startswith", "(", "'-'", ")", "}", "return", "diag_df", ",", "diag_desc" ]
Read an output's diaginfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- diaginfo_file : str Path to diaginfo.dat Returns ------- DataFrame containing the category information.
[ "Read", "an", "output", "s", "diaginfo", ".", "dat", "file", "and", "parse", "into", "a", "DataFrame", "for", "use", "in", "selecting", "and", "parsing", "categories", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/util/diaginfo.py#L39-L66
train
darothen/xbpch
xbpch/util/diaginfo.py
get_tracerinfo
def get_tracerinfo(tracerinfo_file): """ Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information. """ widths = [rec.width for rec in tracer_recs] col_names = [rec.name for rec in tracer_recs] dtypes = [rec.type for rec in tracer_recs] usecols = [name for name in col_names if not name.startswith('-')] tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) # Check an edge case related to a bug in GEOS-Chem v12.0.3 which # erroneously dropped short/long tracer names in certain tracerinfo.dat outputs. # What we do here is figure out which rows were erroneously processed (they'll # have NaNs in them) and raise a warning if there are any na_free = tracer_df.dropna(subset=['tracer', 'scale']) only_na = tracer_df[~tracer_df.index.isin(na_free.index)] if len(only_na) > 0: warn("At least one row in {} wasn't decoded correctly; we strongly" " recommend you manually check that file to see that all" " tracers are properly recorded." .format(tracerinfo_file)) tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs if not tracer.name.startswith('-')} # Process some of the information about which variables are hydrocarbons # and chemical tracers versus other diagnostics. def _assign_hydrocarbon(row): if row['C'] != 1: row['hydrocarbon'] = True row['molwt'] = C_MOLECULAR_WEIGHT else: row['hydrocarbon'] = False return row tracer_df = ( tracer_df .apply(_assign_hydrocarbon, axis=1) .assign(chemical=lambda x: x['molwt'].astype(bool)) ) return tracer_df, tracer_desc
python
def get_tracerinfo(tracerinfo_file): """ Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information. """ widths = [rec.width for rec in tracer_recs] col_names = [rec.name for rec in tracer_recs] dtypes = [rec.type for rec in tracer_recs] usecols = [name for name in col_names if not name.startswith('-')] tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names, dtypes=dtypes, comment="#", header=None, usecols=usecols) # Check an edge case related to a bug in GEOS-Chem v12.0.3 which # erroneously dropped short/long tracer names in certain tracerinfo.dat outputs. # What we do here is figure out which rows were erroneously processed (they'll # have NaNs in them) and raise a warning if there are any na_free = tracer_df.dropna(subset=['tracer', 'scale']) only_na = tracer_df[~tracer_df.index.isin(na_free.index)] if len(only_na) > 0: warn("At least one row in {} wasn't decoded correctly; we strongly" " recommend you manually check that file to see that all" " tracers are properly recorded." .format(tracerinfo_file)) tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs if not tracer.name.startswith('-')} # Process some of the information about which variables are hydrocarbons # and chemical tracers versus other diagnostics. def _assign_hydrocarbon(row): if row['C'] != 1: row['hydrocarbon'] = True row['molwt'] = C_MOLECULAR_WEIGHT else: row['hydrocarbon'] = False return row tracer_df = ( tracer_df .apply(_assign_hydrocarbon, axis=1) .assign(chemical=lambda x: x['molwt'].astype(bool)) ) return tracer_df, tracer_desc
[ "def", "get_tracerinfo", "(", "tracerinfo_file", ")", ":", "widths", "=", "[", "rec", ".", "width", "for", "rec", "in", "tracer_recs", "]", "col_names", "=", "[", "rec", ".", "name", "for", "rec", "in", "tracer_recs", "]", "dtypes", "=", "[", "rec", ".", "type", "for", "rec", "in", "tracer_recs", "]", "usecols", "=", "[", "name", "for", "name", "in", "col_names", "if", "not", "name", ".", "startswith", "(", "'-'", ")", "]", "tracer_df", "=", "pd", ".", "read_fwf", "(", "tracerinfo_file", ",", "widths", "=", "widths", ",", "names", "=", "col_names", ",", "dtypes", "=", "dtypes", ",", "comment", "=", "\"#\"", ",", "header", "=", "None", ",", "usecols", "=", "usecols", ")", "# Check an edge case related to a bug in GEOS-Chem v12.0.3 which ", "# erroneously dropped short/long tracer names in certain tracerinfo.dat outputs.", "# What we do here is figure out which rows were erroneously processed (they'll ", "# have NaNs in them) and raise a warning if there are any", "na_free", "=", "tracer_df", ".", "dropna", "(", "subset", "=", "[", "'tracer'", ",", "'scale'", "]", ")", "only_na", "=", "tracer_df", "[", "~", "tracer_df", ".", "index", ".", "isin", "(", "na_free", ".", "index", ")", "]", "if", "len", "(", "only_na", ")", ">", "0", ":", "warn", "(", "\"At least one row in {} wasn't decoded correctly; we strongly\"", "\" recommend you manually check that file to see that all\"", "\" tracers are properly recorded.\"", ".", "format", "(", "tracerinfo_file", ")", ")", "tracer_desc", "=", "{", "tracer", ".", "name", ":", "tracer", ".", "desc", "for", "tracer", "in", "tracer_recs", "if", "not", "tracer", ".", "name", ".", "startswith", "(", "'-'", ")", "}", "# Process some of the information about which variables are hydrocarbons", "# and chemical tracers versus other diagnostics.", "def", "_assign_hydrocarbon", "(", "row", ")", ":", "if", "row", "[", "'C'", "]", "!=", "1", ":", "row", "[", "'hydrocarbon'", "]", "=", "True", "row", "[", "'molwt'", "]", "=", "C_MOLECULAR_WEIGHT", "else", ":", "row", "[", "'hydrocarbon'", "]", "=", "False", "return", "row", "tracer_df", "=", "(", "tracer_df", ".", "apply", "(", "_assign_hydrocarbon", ",", "axis", "=", "1", ")", ".", "assign", "(", "chemical", "=", "lambda", "x", ":", "x", "[", "'molwt'", "]", ".", "astype", "(", "bool", ")", ")", ")", "return", "tracer_df", ",", "tracer_desc" ]
Read an output's tracerinfo.dat file and parse into a DataFrame for use in selecting and parsing categories. Parameters ---------- tracerinfo_file : str Path to tracerinfo.dat Returns ------- DataFrame containing the tracer information.
[ "Read", "an", "output", "s", "tracerinfo", ".", "dat", "file", "and", "parse", "into", "a", "DataFrame", "for", "use", "in", "selecting", "and", "parsing", "categories", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/util/diaginfo.py#L69-L125
train
darothen/xbpch
xbpch/bpch.py
read_from_bpch
def read_from_bpch(filename, file_position, shape, dtype, endian, use_mmap=False): """ Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`. """ offset = file_position + 4 if use_mmap: d = np.memmap(filename, dtype=dtype, mode='r', shape=shape, offset=offset, order='F') else: with FortranFile(filename, 'rb', endian) as ff: ff.seek(file_position) d = np.array(ff.readline('*f')) d = d.reshape(shape, order='F') # As a sanity check, *be sure* that the resulting data block has the # correct shape, and fail early if it doesn't. if (d.shape != shape): raise IOError("Data chunk read from {} does not have the right shape," " (expected {} but got {})" .format(filename, shape, d.shape)) return d
python
def read_from_bpch(filename, file_position, shape, dtype, endian, use_mmap=False): """ Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`. """ offset = file_position + 4 if use_mmap: d = np.memmap(filename, dtype=dtype, mode='r', shape=shape, offset=offset, order='F') else: with FortranFile(filename, 'rb', endian) as ff: ff.seek(file_position) d = np.array(ff.readline('*f')) d = d.reshape(shape, order='F') # As a sanity check, *be sure* that the resulting data block has the # correct shape, and fail early if it doesn't. if (d.shape != shape): raise IOError("Data chunk read from {} does not have the right shape," " (expected {} but got {})" .format(filename, shape, d.shape)) return d
[ "def", "read_from_bpch", "(", "filename", ",", "file_position", ",", "shape", ",", "dtype", ",", "endian", ",", "use_mmap", "=", "False", ")", ":", "offset", "=", "file_position", "+", "4", "if", "use_mmap", ":", "d", "=", "np", ".", "memmap", "(", "filename", ",", "dtype", "=", "dtype", ",", "mode", "=", "'r'", ",", "shape", "=", "shape", ",", "offset", "=", "offset", ",", "order", "=", "'F'", ")", "else", ":", "with", "FortranFile", "(", "filename", ",", "'rb'", ",", "endian", ")", "as", "ff", ":", "ff", ".", "seek", "(", "file_position", ")", "d", "=", "np", ".", "array", "(", "ff", ".", "readline", "(", "'*f'", ")", ")", "d", "=", "d", ".", "reshape", "(", "shape", ",", "order", "=", "'F'", ")", "# As a sanity check, *be sure* that the resulting data block has the", "# correct shape, and fail early if it doesn't.", "if", "(", "d", ".", "shape", "!=", "shape", ")", ":", "raise", "IOError", "(", "\"Data chunk read from {} does not have the right shape,\"", "\" (expected {} but got {})\"", ".", "format", "(", "filename", ",", "shape", ",", "d", ".", "shape", ")", ")", "return", "d" ]
Read a chunk of data from a bpch output file. Parameters ---------- filename : str Path to file on disk containing the data file_position : int Position (bytes) where desired data chunk begins shape : tuple of ints Resultant (n-dimensional) shape of requested data; the chunk will be read sequentially from disk and then re-shaped dtype : dtype Dtype of data; for best results, pass a dtype which includes an endian indicator, e.g. `dtype = np.dtype('>f4')` endian : str Endianness of data; should be consistent with `dtype` use_mmap : bool Memory map the chunk of data to the file on disk, else read immediately Returns ------- Array with shape `shape` and dtype `dtype` containing the requested chunk of data from `filename`.
[ "Read", "a", "chunk", "of", "data", "from", "a", "bpch", "output", "file", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L353-L398
train
darothen/xbpch
xbpch/bpch.py
BPCHDataBundle._read
def _read(self): """ Helper function to load the data referenced by this bundle. """ if self._dask: d = da.from_delayed( delayed(read_from_bpch, )( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ), self.shape, self.dtype ) else: d = read_from_bpch( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ) return d
python
def _read(self): """ Helper function to load the data referenced by this bundle. """ if self._dask: d = da.from_delayed( delayed(read_from_bpch, )( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ), self.shape, self.dtype ) else: d = read_from_bpch( self.filename, self.file_position, self.shape, self.dtype, self.endian, use_mmap=self._mmap ) return d
[ "def", "_read", "(", "self", ")", ":", "if", "self", ".", "_dask", ":", "d", "=", "da", ".", "from_delayed", "(", "delayed", "(", "read_from_bpch", ",", ")", "(", "self", ".", "filename", ",", "self", ".", "file_position", ",", "self", ".", "shape", ",", "self", ".", "dtype", ",", "self", ".", "endian", ",", "use_mmap", "=", "self", ".", "_mmap", ")", ",", "self", ".", "shape", ",", "self", ".", "dtype", ")", "else", ":", "d", "=", "read_from_bpch", "(", "self", ".", "filename", ",", "self", ".", "file_position", ",", "self", ".", "shape", ",", "self", ".", "dtype", ",", "self", ".", "endian", ",", "use_mmap", "=", "self", ".", "_mmap", ")", "return", "d" ]
Helper function to load the data referenced by this bundle.
[ "Helper", "function", "to", "load", "the", "data", "referenced", "by", "this", "bundle", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L68-L84
train
darothen/xbpch
xbpch/bpch.py
BPCHFile.close
def close(self): """ Close this bpch file. """ if not self.fp.closed: for v in list(self.var_data): del self.var_data[v] self.fp.close()
python
def close(self): """ Close this bpch file. """ if not self.fp.closed: for v in list(self.var_data): del self.var_data[v] self.fp.close()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "fp", ".", "closed", ":", "for", "v", "in", "list", "(", "self", ".", "var_data", ")", ":", "del", "self", ".", "var_data", "[", "v", "]", "self", ".", "fp", ".", "close", "(", ")" ]
Close this bpch file.
[ "Close", "this", "bpch", "file", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L176-L185
train
darothen/xbpch
xbpch/bpch.py
BPCHFile._read_metadata
def _read_metadata(self): """ Read the main metadata packaged within a bpch file, indicating the output filetype and its title. """ filetype = self.fp.readline().strip() filetitle = self.fp.readline().strip() # Decode to UTF string, if possible try: filetype = str(filetype, 'utf-8') filetitle = str(filetitle, 'utf-8') except: # TODO: Handle this edge-case of converting file metadata more elegantly. pass self.__setattr__('filetype', filetype) self.__setattr__('filetitle', filetitle)
python
def _read_metadata(self): """ Read the main metadata packaged within a bpch file, indicating the output filetype and its title. """ filetype = self.fp.readline().strip() filetitle = self.fp.readline().strip() # Decode to UTF string, if possible try: filetype = str(filetype, 'utf-8') filetitle = str(filetitle, 'utf-8') except: # TODO: Handle this edge-case of converting file metadata more elegantly. pass self.__setattr__('filetype', filetype) self.__setattr__('filetitle', filetitle)
[ "def", "_read_metadata", "(", "self", ")", ":", "filetype", "=", "self", ".", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "filetitle", "=", "self", ".", "fp", ".", "readline", "(", ")", ".", "strip", "(", ")", "# Decode to UTF string, if possible", "try", ":", "filetype", "=", "str", "(", "filetype", ",", "'utf-8'", ")", "filetitle", "=", "str", "(", "filetitle", ",", "'utf-8'", ")", "except", ":", "# TODO: Handle this edge-case of converting file metadata more elegantly.", "pass", "self", ".", "__setattr__", "(", "'filetype'", ",", "filetype", ")", "self", ".", "__setattr__", "(", "'filetitle'", ",", "filetitle", ")" ]
Read the main metadata packaged within a bpch file, indicating the output filetype and its title.
[ "Read", "the", "main", "metadata", "packaged", "within", "a", "bpch", "file", "indicating", "the", "output", "filetype", "and", "its", "title", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L203-L220
train
darothen/xbpch
xbpch/bpch.py
BPCHFile._read_header
def _read_header(self): """ Process the header information (data model / grid spec) """ self._header_pos = self.fp.tell() line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line self._attributes.update({ "modelname": str(modelname, 'utf-8').strip(), "halfpolar": halfpolar, "center180": center180, "res": (res0, res1) }) self.__setattr__('modelname', modelname) self.__setattr__('res', (res0, res1)) self.__setattr__('halfpolar', halfpolar) self.__setattr__('center180', center180) # Re-wind the file self.fp.seek(self._header_pos)
python
def _read_header(self): """ Process the header information (data model / grid spec) """ self._header_pos = self.fp.tell() line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line self._attributes.update({ "modelname": str(modelname, 'utf-8').strip(), "halfpolar": halfpolar, "center180": center180, "res": (res0, res1) }) self.__setattr__('modelname', modelname) self.__setattr__('res', (res0, res1)) self.__setattr__('halfpolar', halfpolar) self.__setattr__('center180', center180) # Re-wind the file self.fp.seek(self._header_pos)
[ "def", "_read_header", "(", "self", ")", ":", "self", ".", "_header_pos", "=", "self", ".", "fp", ".", "tell", "(", ")", "line", "=", "self", ".", "fp", ".", "readline", "(", "'20sffii'", ")", "modelname", ",", "res0", ",", "res1", ",", "halfpolar", ",", "center180", "=", "line", "self", ".", "_attributes", ".", "update", "(", "{", "\"modelname\"", ":", "str", "(", "modelname", ",", "'utf-8'", ")", ".", "strip", "(", ")", ",", "\"halfpolar\"", ":", "halfpolar", ",", "\"center180\"", ":", "center180", ",", "\"res\"", ":", "(", "res0", ",", "res1", ")", "}", ")", "self", ".", "__setattr__", "(", "'modelname'", ",", "modelname", ")", "self", ".", "__setattr__", "(", "'res'", ",", "(", "res0", ",", "res1", ")", ")", "self", ".", "__setattr__", "(", "'halfpolar'", ",", "halfpolar", ")", "self", ".", "__setattr__", "(", "'center180'", ",", "center180", ")", "# Re-wind the file", "self", ".", "fp", ".", "seek", "(", "self", ".", "_header_pos", ")" ]
Process the header information (data model / grid spec)
[ "Process", "the", "header", "information", "(", "data", "model", "/", "grid", "spec", ")" ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L222-L241
train
darothen/xbpch
xbpch/bpch.py
BPCHFile._read_var_data
def _read_var_data(self): """ Iterate over the block of this bpch file and return handlers in the form of `BPCHDataBundle`s for access to the data contained therein. """ var_bundles = OrderedDict() var_attrs = OrderedDict() n_vars = 0 while self.fp.tell() < self.fsize: var_attr = OrderedDict() # read first and second header lines line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line line = self.fp.readline('40si40sdd40s7i') category_name, number, unit, tau0, tau1, reserved = line[:6] dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:] var_attr['number'] = number # Decode byte-strings to utf-8 category_name = str(category_name, 'utf-8') var_attr['category'] = category_name.strip() unit = str(unit, 'utf-8') # get additional metadata from tracerinfo / diaginfo try: cat_df = self.diaginfo_df[ self.diaginfo_df.name == category_name.strip() ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(cat_df > 1): # raise ValueError( # "More than one category matching {} found in " # "diaginfo.dat".format( # category_name.strip() # ) # ) # Safe now to select the only row in the DataFrame cat = cat_df.T.squeeze() tracer_num = int(cat.offset) + int(number) diag_df = self.tracerinfo_df[ self.tracerinfo_df.tracer == tracer_num ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(diag_df > 1): # raise ValueError( # "More than one tracer matching {:d} found in " # "tracerinfo.dat".format(tracer_num) # ) # Safe now to select only row in the DataFrame diag = diag_df.T.squeeze() diag_attr = diag.to_dict() if not unit.strip(): # unit may be empty in bpch unit = diag_attr['unit'] # but not in tracerinfo var_attr.update(diag_attr) except: diag = {'name': '', 'scale': 1} var_attr.update(diag) var_attr['unit'] = unit vname = diag['name'] fullname = category_name.strip() + "_" + vname # parse metadata, get data or set a data proxy if dim2 == 1: data_shape = (dim0, dim1) # 2D field else: data_shape = (dim0, dim1, dim2) var_attr['original_shape'] = data_shape # Add proxy time dimension to shape data_shape = tuple([1, ] + list(data_shape)) origin = (dim3, dim4, dim5) var_attr['origin'] = origin timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1) pos = self.fp.tell() # Note that we don't pass a dtype, and assume everything is # single-fp floats with the correct endian, as hard-coded var_bundle = BPCHDataBundle( data_shape, self.endian, self.filename, pos, [timelo, timehi], metadata=var_attr, use_mmap=self.use_mmap, dask_delayed=self.dask_delayed ) self.fp.skipline() # Save the data as a "bundle" for concatenating in the final step if fullname in var_bundles: var_bundles[fullname].append(var_bundle) else: var_bundles[fullname] = [var_bundle, ] var_attrs[fullname] = var_attr n_vars += 1 self.var_data = var_bundles self.var_attrs = var_attrs
python
def _read_var_data(self): """ Iterate over the block of this bpch file and return handlers in the form of `BPCHDataBundle`s for access to the data contained therein. """ var_bundles = OrderedDict() var_attrs = OrderedDict() n_vars = 0 while self.fp.tell() < self.fsize: var_attr = OrderedDict() # read first and second header lines line = self.fp.readline('20sffii') modelname, res0, res1, halfpolar, center180 = line line = self.fp.readline('40si40sdd40s7i') category_name, number, unit, tau0, tau1, reserved = line[:6] dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:] var_attr['number'] = number # Decode byte-strings to utf-8 category_name = str(category_name, 'utf-8') var_attr['category'] = category_name.strip() unit = str(unit, 'utf-8') # get additional metadata from tracerinfo / diaginfo try: cat_df = self.diaginfo_df[ self.diaginfo_df.name == category_name.strip() ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(cat_df > 1): # raise ValueError( # "More than one category matching {} found in " # "diaginfo.dat".format( # category_name.strip() # ) # ) # Safe now to select the only row in the DataFrame cat = cat_df.T.squeeze() tracer_num = int(cat.offset) + int(number) diag_df = self.tracerinfo_df[ self.tracerinfo_df.tracer == tracer_num ] # TODO: Safer logic for handling case where more than one # tracer metadata match was made # if len(diag_df > 1): # raise ValueError( # "More than one tracer matching {:d} found in " # "tracerinfo.dat".format(tracer_num) # ) # Safe now to select only row in the DataFrame diag = diag_df.T.squeeze() diag_attr = diag.to_dict() if not unit.strip(): # unit may be empty in bpch unit = diag_attr['unit'] # but not in tracerinfo var_attr.update(diag_attr) except: diag = {'name': '', 'scale': 1} var_attr.update(diag) var_attr['unit'] = unit vname = diag['name'] fullname = category_name.strip() + "_" + vname # parse metadata, get data or set a data proxy if dim2 == 1: data_shape = (dim0, dim1) # 2D field else: data_shape = (dim0, dim1, dim2) var_attr['original_shape'] = data_shape # Add proxy time dimension to shape data_shape = tuple([1, ] + list(data_shape)) origin = (dim3, dim4, dim5) var_attr['origin'] = origin timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1) pos = self.fp.tell() # Note that we don't pass a dtype, and assume everything is # single-fp floats with the correct endian, as hard-coded var_bundle = BPCHDataBundle( data_shape, self.endian, self.filename, pos, [timelo, timehi], metadata=var_attr, use_mmap=self.use_mmap, dask_delayed=self.dask_delayed ) self.fp.skipline() # Save the data as a "bundle" for concatenating in the final step if fullname in var_bundles: var_bundles[fullname].append(var_bundle) else: var_bundles[fullname] = [var_bundle, ] var_attrs[fullname] = var_attr n_vars += 1 self.var_data = var_bundles self.var_attrs = var_attrs
[ "def", "_read_var_data", "(", "self", ")", ":", "var_bundles", "=", "OrderedDict", "(", ")", "var_attrs", "=", "OrderedDict", "(", ")", "n_vars", "=", "0", "while", "self", ".", "fp", ".", "tell", "(", ")", "<", "self", ".", "fsize", ":", "var_attr", "=", "OrderedDict", "(", ")", "# read first and second header lines", "line", "=", "self", ".", "fp", ".", "readline", "(", "'20sffii'", ")", "modelname", ",", "res0", ",", "res1", ",", "halfpolar", ",", "center180", "=", "line", "line", "=", "self", ".", "fp", ".", "readline", "(", "'40si40sdd40s7i'", ")", "category_name", ",", "number", ",", "unit", ",", "tau0", ",", "tau1", ",", "reserved", "=", "line", "[", ":", "6", "]", "dim0", ",", "dim1", ",", "dim2", ",", "dim3", ",", "dim4", ",", "dim5", ",", "skip", "=", "line", "[", "6", ":", "]", "var_attr", "[", "'number'", "]", "=", "number", "# Decode byte-strings to utf-8", "category_name", "=", "str", "(", "category_name", ",", "'utf-8'", ")", "var_attr", "[", "'category'", "]", "=", "category_name", ".", "strip", "(", ")", "unit", "=", "str", "(", "unit", ",", "'utf-8'", ")", "# get additional metadata from tracerinfo / diaginfo", "try", ":", "cat_df", "=", "self", ".", "diaginfo_df", "[", "self", ".", "diaginfo_df", ".", "name", "==", "category_name", ".", "strip", "(", ")", "]", "# TODO: Safer logic for handling case where more than one", "# tracer metadata match was made", "# if len(cat_df > 1):", "# raise ValueError(", "# \"More than one category matching {} found in \"", "# \"diaginfo.dat\".format(", "# category_name.strip()", "# )", "# )", "# Safe now to select the only row in the DataFrame", "cat", "=", "cat_df", ".", "T", ".", "squeeze", "(", ")", "tracer_num", "=", "int", "(", "cat", ".", "offset", ")", "+", "int", "(", "number", ")", "diag_df", "=", "self", ".", "tracerinfo_df", "[", "self", ".", "tracerinfo_df", ".", "tracer", "==", "tracer_num", "]", "# TODO: Safer logic for handling case where more than one", "# tracer metadata match was made", "# if len(diag_df > 1):", "# raise ValueError(", "# \"More than one tracer matching {:d} found in \"", "# \"tracerinfo.dat\".format(tracer_num)", "# )", "# Safe now to select only row in the DataFrame", "diag", "=", "diag_df", ".", "T", ".", "squeeze", "(", ")", "diag_attr", "=", "diag", ".", "to_dict", "(", ")", "if", "not", "unit", ".", "strip", "(", ")", ":", "# unit may be empty in bpch", "unit", "=", "diag_attr", "[", "'unit'", "]", "# but not in tracerinfo", "var_attr", ".", "update", "(", "diag_attr", ")", "except", ":", "diag", "=", "{", "'name'", ":", "''", ",", "'scale'", ":", "1", "}", "var_attr", ".", "update", "(", "diag", ")", "var_attr", "[", "'unit'", "]", "=", "unit", "vname", "=", "diag", "[", "'name'", "]", "fullname", "=", "category_name", ".", "strip", "(", ")", "+", "\"_\"", "+", "vname", "# parse metadata, get data or set a data proxy", "if", "dim2", "==", "1", ":", "data_shape", "=", "(", "dim0", ",", "dim1", ")", "# 2D field", "else", ":", "data_shape", "=", "(", "dim0", ",", "dim1", ",", "dim2", ")", "var_attr", "[", "'original_shape'", "]", "=", "data_shape", "# Add proxy time dimension to shape", "data_shape", "=", "tuple", "(", "[", "1", ",", "]", "+", "list", "(", "data_shape", ")", ")", "origin", "=", "(", "dim3", ",", "dim4", ",", "dim5", ")", "var_attr", "[", "'origin'", "]", "=", "origin", "timelo", ",", "timehi", "=", "cf", ".", "tau2time", "(", "tau0", ")", ",", "cf", ".", "tau2time", "(", "tau1", ")", "pos", "=", "self", ".", "fp", ".", "tell", "(", ")", "# Note that we don't pass a dtype, and assume everything is", "# single-fp floats with the correct endian, as hard-coded", "var_bundle", "=", "BPCHDataBundle", "(", "data_shape", ",", "self", ".", "endian", ",", "self", ".", "filename", ",", "pos", ",", "[", "timelo", ",", "timehi", "]", ",", "metadata", "=", "var_attr", ",", "use_mmap", "=", "self", ".", "use_mmap", ",", "dask_delayed", "=", "self", ".", "dask_delayed", ")", "self", ".", "fp", ".", "skipline", "(", ")", "# Save the data as a \"bundle\" for concatenating in the final step", "if", "fullname", "in", "var_bundles", ":", "var_bundles", "[", "fullname", "]", ".", "append", "(", "var_bundle", ")", "else", ":", "var_bundles", "[", "fullname", "]", "=", "[", "var_bundle", ",", "]", "var_attrs", "[", "fullname", "]", "=", "var_attr", "n_vars", "+=", "1", "self", ".", "var_data", "=", "var_bundles", "self", ".", "var_attrs", "=", "var_attrs" ]
Iterate over the block of this bpch file and return handlers in the form of `BPCHDataBundle`s for access to the data contained therein.
[ "Iterate", "over", "the", "block", "of", "this", "bpch", "file", "and", "return", "handlers", "in", "the", "form", "of", "BPCHDataBundle", "s", "for", "access", "to", "the", "data", "contained", "therein", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/bpch.py#L244-L350
train
darothen/xbpch
xbpch/common.py
broadcast_1d_array
def broadcast_1d_array(arr, ndim, axis=1): """ Broadcast 1-d array `arr` to `ndim` dimensions on the first axis (`axis`=0) or on the last axis (`axis`=1). Useful for 'outer' calculations involving 1-d arrays that are related to different axes on a multidimensional grid. """ ext_arr = arr for i in range(ndim - 1): ext_arr = np.expand_dims(ext_arr, axis=axis) return ext_arr
python
def broadcast_1d_array(arr, ndim, axis=1): """ Broadcast 1-d array `arr` to `ndim` dimensions on the first axis (`axis`=0) or on the last axis (`axis`=1). Useful for 'outer' calculations involving 1-d arrays that are related to different axes on a multidimensional grid. """ ext_arr = arr for i in range(ndim - 1): ext_arr = np.expand_dims(ext_arr, axis=axis) return ext_arr
[ "def", "broadcast_1d_array", "(", "arr", ",", "ndim", ",", "axis", "=", "1", ")", ":", "ext_arr", "=", "arr", "for", "i", "in", "range", "(", "ndim", "-", "1", ")", ":", "ext_arr", "=", "np", ".", "expand_dims", "(", "ext_arr", ",", "axis", "=", "axis", ")", "return", "ext_arr" ]
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis (`axis`=0) or on the last axis (`axis`=1). Useful for 'outer' calculations involving 1-d arrays that are related to different axes on a multidimensional grid.
[ "Broadcast", "1", "-", "d", "array", "arr", "to", "ndim", "dimensions", "on", "the", "first", "axis", "(", "axis", "=", "0", ")", "or", "on", "the", "last", "axis", "(", "axis", "=", "1", ")", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/common.py#L9-L20
train
darothen/xbpch
xbpch/common.py
get_timestamp
def get_timestamp(time=True, date=True, fmt=None): """ Return the current timestamp in machine local time. Parameters: ----------- time, date : Boolean Flag to include the time or date components, respectively, in the output. fmt : str, optional If passed, will override the time/date choice and use as the format string passed to `strftime`. """ time_format = "%H:%M:%S" date_format = "%m-%d-%Y" if fmt is None: if time and date: fmt = time_format + " " + date_format elif time: fmt = time_format elif date: fmt = date_format else: raise ValueError("One of `date` or `time` must be True!") return datetime.now().strftime(fmt)
python
def get_timestamp(time=True, date=True, fmt=None): """ Return the current timestamp in machine local time. Parameters: ----------- time, date : Boolean Flag to include the time or date components, respectively, in the output. fmt : str, optional If passed, will override the time/date choice and use as the format string passed to `strftime`. """ time_format = "%H:%M:%S" date_format = "%m-%d-%Y" if fmt is None: if time and date: fmt = time_format + " " + date_format elif time: fmt = time_format elif date: fmt = date_format else: raise ValueError("One of `date` or `time` must be True!") return datetime.now().strftime(fmt)
[ "def", "get_timestamp", "(", "time", "=", "True", ",", "date", "=", "True", ",", "fmt", "=", "None", ")", ":", "time_format", "=", "\"%H:%M:%S\"", "date_format", "=", "\"%m-%d-%Y\"", "if", "fmt", "is", "None", ":", "if", "time", "and", "date", ":", "fmt", "=", "time_format", "+", "\" \"", "+", "date_format", "elif", "time", ":", "fmt", "=", "time_format", "elif", "date", ":", "fmt", "=", "date_format", "else", ":", "raise", "ValueError", "(", "\"One of `date` or `time` must be True!\"", ")", "return", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "fmt", ")" ]
Return the current timestamp in machine local time. Parameters: ----------- time, date : Boolean Flag to include the time or date components, respectively, in the output. fmt : str, optional If passed, will override the time/date choice and use as the format string passed to `strftime`.
[ "Return", "the", "current", "timestamp", "in", "machine", "local", "time", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/common.py#L23-L49
train
darothen/xbpch
xbpch/common.py
fix_attr_encoding
def fix_attr_encoding(ds): """ This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed. """ def _maybe_del_attr(da, attr): """ Possibly delete an attribute on a DataArray if it's present """ if attr in da.attrs: del da.attrs[attr] return da def _maybe_decode_attr(da, attr): # TODO: Fix this so that bools get written as attributes just fine """ Possibly coerce an attribute on a DataArray to an easier type to write to disk. """ # bool -> int if (attr in da.attrs) and (type(da.attrs[attr] == bool)): da.attrs[attr] = int(da.attrs[attr]) return da for v in ds.data_vars: da = ds[v] da = _maybe_del_attr(da, 'scale_factor') da = _maybe_del_attr(da, 'units') da = _maybe_decode_attr(da, 'hydrocarbon') da = _maybe_decode_attr(da, 'chemical') # Also delete attributes on time. if hasattr(ds, 'time'): times = ds.time times = _maybe_del_attr(times, 'units') return ds
python
def fix_attr_encoding(ds): """ This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed. """ def _maybe_del_attr(da, attr): """ Possibly delete an attribute on a DataArray if it's present """ if attr in da.attrs: del da.attrs[attr] return da def _maybe_decode_attr(da, attr): # TODO: Fix this so that bools get written as attributes just fine """ Possibly coerce an attribute on a DataArray to an easier type to write to disk. """ # bool -> int if (attr in da.attrs) and (type(da.attrs[attr] == bool)): da.attrs[attr] = int(da.attrs[attr]) return da for v in ds.data_vars: da = ds[v] da = _maybe_del_attr(da, 'scale_factor') da = _maybe_del_attr(da, 'units') da = _maybe_decode_attr(da, 'hydrocarbon') da = _maybe_decode_attr(da, 'chemical') # Also delete attributes on time. if hasattr(ds, 'time'): times = ds.time times = _maybe_del_attr(times, 'units') return ds
[ "def", "fix_attr_encoding", "(", "ds", ")", ":", "def", "_maybe_del_attr", "(", "da", ",", "attr", ")", ":", "\"\"\" Possibly delete an attribute on a DataArray if it's present \"\"\"", "if", "attr", "in", "da", ".", "attrs", ":", "del", "da", ".", "attrs", "[", "attr", "]", "return", "da", "def", "_maybe_decode_attr", "(", "da", ",", "attr", ")", ":", "# TODO: Fix this so that bools get written as attributes just fine", "\"\"\" Possibly coerce an attribute on a DataArray to an easier type\n to write to disk. \"\"\"", "# bool -> int", "if", "(", "attr", "in", "da", ".", "attrs", ")", "and", "(", "type", "(", "da", ".", "attrs", "[", "attr", "]", "==", "bool", ")", ")", ":", "da", ".", "attrs", "[", "attr", "]", "=", "int", "(", "da", ".", "attrs", "[", "attr", "]", ")", "return", "da", "for", "v", "in", "ds", ".", "data_vars", ":", "da", "=", "ds", "[", "v", "]", "da", "=", "_maybe_del_attr", "(", "da", ",", "'scale_factor'", ")", "da", "=", "_maybe_del_attr", "(", "da", ",", "'units'", ")", "da", "=", "_maybe_decode_attr", "(", "da", ",", "'hydrocarbon'", ")", "da", "=", "_maybe_decode_attr", "(", "da", ",", "'chemical'", ")", "# Also delete attributes on time.", "if", "hasattr", "(", "ds", ",", "'time'", ")", ":", "times", "=", "ds", ".", "time", "times", "=", "_maybe_del_attr", "(", "times", ",", "'units'", ")", "return", "ds" ]
This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed.
[ "This", "is", "a", "temporary", "hot", "-", "fix", "to", "handle", "the", "way", "metadata", "is", "encoded", "when", "we", "read", "data", "directly", "from", "bpch", "files", ".", "It", "removes", "the", "scale_factor", "and", "units", "attributes", "we", "encode", "with", "the", "data", "we", "ingest", "converts", "the", "hydrocarbon", "and", "chemical", "attribute", "to", "a", "binary", "integer", "instead", "of", "a", "boolean", "and", "removes", "the", "units", "attribute", "from", "the", "time", "dimension", "since", "that", "too", "is", "implicitly", "encoded", "." ]
31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/common.py#L52-L92
train
asmeurer/iterm2-tools
iterm2_tools/shell_integration.py
after_output
def after_output(command_status): """ Shell sequence to be run after the command output. The ``command_status`` should be in the range 0-255. """ if command_status not in range(256): raise ValueError("command_status must be an integer in the range 0-255") sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status)) # Flushing is important as the command timing feature maybe based on # AFTER_OUTPUT in the future. sys.stdout.flush()
python
def after_output(command_status): """ Shell sequence to be run after the command output. The ``command_status`` should be in the range 0-255. """ if command_status not in range(256): raise ValueError("command_status must be an integer in the range 0-255") sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status)) # Flushing is important as the command timing feature maybe based on # AFTER_OUTPUT in the future. sys.stdout.flush()
[ "def", "after_output", "(", "command_status", ")", ":", "if", "command_status", "not", "in", "range", "(", "256", ")", ":", "raise", "ValueError", "(", "\"command_status must be an integer in the range 0-255\"", ")", "sys", ".", "stdout", ".", "write", "(", "AFTER_OUTPUT", ".", "format", "(", "command_status", "=", "command_status", ")", ")", "# Flushing is important as the command timing feature maybe based on", "# AFTER_OUTPUT in the future.", "sys", ".", "stdout", ".", "flush", "(", ")" ]
Shell sequence to be run after the command output. The ``command_status`` should be in the range 0-255.
[ "Shell", "sequence", "to", "be", "run", "after", "the", "command", "output", "." ]
97b1b593bb02884521c2c05ed414f178de0b934e
https://github.com/asmeurer/iterm2-tools/blob/97b1b593bb02884521c2c05ed414f178de0b934e/iterm2_tools/shell_integration.py#L153-L164
train
bitlabstudio/django-multilingual-news
multilingual_news/south_migrations/0009_migrate_i18n_fields.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry_title in orm.NewsEntryTitle.objects.all(): entry = NewsEntry.objects.get(pk=entry_title.entry.pk) entry.translate(entry_title.language) entry.title = entry_title.title entry.slug = entry_title.slug entry.is_published = entry_title.is_published entry.save()
python
def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." for entry_title in orm.NewsEntryTitle.objects.all(): entry = NewsEntry.objects.get(pk=entry_title.entry.pk) entry.translate(entry_title.language) entry.title = entry_title.title entry.slug = entry_title.slug entry.is_published = entry_title.is_published entry.save()
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "# Note: Remember to use orm['appname.ModelName'] rather than \"from appname.models...\"", "for", "entry_title", "in", "orm", ".", "NewsEntryTitle", ".", "objects", ".", "all", "(", ")", ":", "entry", "=", "NewsEntry", ".", "objects", ".", "get", "(", "pk", "=", "entry_title", ".", "entry", ".", "pk", ")", "entry", ".", "translate", "(", "entry_title", ".", "language", ")", "entry", ".", "title", "=", "entry_title", ".", "title", "entry", ".", "slug", "=", "entry_title", ".", "slug", "entry", ".", "is_published", "=", "entry_title", ".", "is_published", "entry", ".", "save", "(", ")" ]
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
2ddc076ce2002a9fa462dbba701441879b49a54d
https://github.com/bitlabstudio/django-multilingual-news/blob/2ddc076ce2002a9fa462dbba701441879b49a54d/multilingual_news/south_migrations/0009_migrate_i18n_fields.py#L11-L20
train