repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
LIVVkit/LIVVkit
livvkit/util/TexHelper.py
translate_bit_for_bit
def translate_bit_for_bit(data): """ Translates data where data["Type"]=="Bit for Bit" """ headers = sorted(data.get("Headers", [])) table = '\\FloatBarrier \n \\section{$NAME} \n'.replace('$NAME', data.get("Title", "table")) table += '\\begin{table}[!ht] \n \\begin{center}' # Set the number of columns n_cols = "c"*(len(headers)+1) table += '\n \\begin{tabular}{$NCOLS} \n'.replace("$NCOLS", n_cols) # Put in the headers table += " Variable &" for header in headers: table += ' $HEADER &'.replace('$HEADER', header).replace('%', '\%') table = table[:-1] + ' \\\\ \n \hline \n' # Put in the data for k, v in data.get("Data", []).items(): table += "\n \\textbf{$VAR} & ".replace("$VAR", k) for header in headers: table += ' $VAL &'.replace("$VAL", str(v[header])) table = table[:-1] + ' \\\\' table += '\n \hline \n \end{tabular} \n \end{center} \n \end{table}\n' return table
python
def translate_bit_for_bit(data): """ Translates data where data["Type"]=="Bit for Bit" """ headers = sorted(data.get("Headers", [])) table = '\\FloatBarrier \n \\section{$NAME} \n'.replace('$NAME', data.get("Title", "table")) table += '\\begin{table}[!ht] \n \\begin{center}' # Set the number of columns n_cols = "c"*(len(headers)+1) table += '\n \\begin{tabular}{$NCOLS} \n'.replace("$NCOLS", n_cols) # Put in the headers table += " Variable &" for header in headers: table += ' $HEADER &'.replace('$HEADER', header).replace('%', '\%') table = table[:-1] + ' \\\\ \n \hline \n' # Put in the data for k, v in data.get("Data", []).items(): table += "\n \\textbf{$VAR} & ".replace("$VAR", k) for header in headers: table += ' $VAL &'.replace("$VAL", str(v[header])) table = table[:-1] + ' \\\\' table += '\n \hline \n \end{tabular} \n \end{center} \n \end{table}\n' return table
[ "def", "translate_bit_for_bit", "(", "data", ")", ":", "headers", "=", "sorted", "(", "data", ".", "get", "(", "\"Headers\"", ",", "[", "]", ")", ")", "table", "=", "'\\\\FloatBarrier \\n \\\\section{$NAME} \\n'", ".", "replace", "(", "'$NAME'", ",", "data", ...
Translates data where data["Type"]=="Bit for Bit"
[ "Translates", "data", "where", "data", "[", "Type", "]", "==", "Bit", "for", "Bit" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/TexHelper.py#L175-L197
LIVVkit/LIVVkit
livvkit/util/TexHelper.py
translate_file_diff
def translate_file_diff(data): """ Translates data where data["Type"]=="Diff" """ diff = '\\FloatBarrier \section{Configuration}' sections = data.get('Data') for title, config in sections.items(): title = title.replace('_', '\_') diff += ' \n \\subsection{$NAME}'.replace('$NAME', title) for opt, vals in config.items(): opt = opt.replace('_', '\_') diff += '\n\n \\texttt{$NAME} : '.replace('$NAME', opt) if vals[0]: diff += '$NAME'.replace('$NAME', vals[-1]) else: diff += ('{} \\textit{{{}}}'.format(vals[1], vals[-1])) diff += '\n\n' return diff
python
def translate_file_diff(data): """ Translates data where data["Type"]=="Diff" """ diff = '\\FloatBarrier \section{Configuration}' sections = data.get('Data') for title, config in sections.items(): title = title.replace('_', '\_') diff += ' \n \\subsection{$NAME}'.replace('$NAME', title) for opt, vals in config.items(): opt = opt.replace('_', '\_') diff += '\n\n \\texttt{$NAME} : '.replace('$NAME', opt) if vals[0]: diff += '$NAME'.replace('$NAME', vals[-1]) else: diff += ('{} \\textit{{{}}}'.format(vals[1], vals[-1])) diff += '\n\n' return diff
[ "def", "translate_file_diff", "(", "data", ")", ":", "diff", "=", "'\\\\FloatBarrier \\section{Configuration}'", "sections", "=", "data", ".", "get", "(", "'Data'", ")", "for", "title", ",", "config", "in", "sections", ".", "items", "(", ")", ":", "title", "...
Translates data where data["Type"]=="Diff"
[ "Translates", "data", "where", "data", "[", "Type", "]", "==", "Diff" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/TexHelper.py#L210-L225
jaraco/hgtools
hgtools/managers/subprocess.py
Subprocess._invoke
def _invoke(self, *params): """ Invoke self.exe as a subprocess """ cmd = [self.exe] + list(params) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.location, env=self.env) stdout, stderr = proc.communicate() if not proc.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
python
def _invoke(self, *params): """ Invoke self.exe as a subprocess """ cmd = [self.exe] + list(params) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.location, env=self.env) stdout, stderr = proc.communicate() if not proc.returncode == 0: raise RuntimeError(stderr.strip() or stdout.strip()) return stdout.decode('utf-8')
[ "def", "_invoke", "(", "self", ",", "*", "params", ")", ":", "cmd", "=", "[", "self", ".", "exe", "]", "+", "list", "(", "params", ")", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stde...
Invoke self.exe as a subprocess
[ "Invoke", "self", ".", "exe", "as", "a", "subprocess" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/subprocess.py#L15-L26
Rockhopper-Technologies/pluginlib
pluginlib/_util.py
allow_bare_decorator
def allow_bare_decorator(cls): """ Wrapper for a class decorator which allows for bare decorator and argument syntax """ @wraps(cls) def wrapper(*args, **kwargs): """"Wrapper for real decorator""" # If we weren't only passed a bare class, return class instance if kwargs or len(args) != 1 or not isclass(args[0]): # pylint: disable=no-else-return return cls(*args, **kwargs) # Otherwise, pass call to instance with default values else: return cls()(args[0]) return wrapper
python
def allow_bare_decorator(cls): """ Wrapper for a class decorator which allows for bare decorator and argument syntax """ @wraps(cls) def wrapper(*args, **kwargs): """"Wrapper for real decorator""" # If we weren't only passed a bare class, return class instance if kwargs or len(args) != 1 or not isclass(args[0]): # pylint: disable=no-else-return return cls(*args, **kwargs) # Otherwise, pass call to instance with default values else: return cls()(args[0]) return wrapper
[ "def", "allow_bare_decorator", "(", "cls", ")", ":", "@", "wraps", "(", "cls", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\"Wrapper for real decorator\"\"\"", "# If we weren't only passed a bare class, return class instance", "i...
Wrapper for a class decorator which allows for bare decorator and argument syntax
[ "Wrapper", "for", "a", "class", "decorator", "which", "allows", "for", "bare", "decorator", "and", "argument", "syntax" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_util.py#L104-L120
hbldh/sudokuextract
sudokuextract/imgproc/geometry.py
warp_image_by_corner_points_projection
def warp_image_by_corner_points_projection(corner_points, image): """Given corner points of a Sudoku, warps original selection to a square image. :param corner_points: :type: corner_points: list :param image: :type image: :return: :rtype: """ # Clarify by storing in named variables. top_left, top_right, bottom_left, bottom_right = np.array(corner_points) top_edge = np.linalg.norm(top_right - top_left) bottom_edge = np.linalg.norm(bottom_right - bottom_left) left_edge = np.linalg.norm(top_left - bottom_left) right_edge = np.linalg.norm(top_right - bottom_right) L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge]))) src = np.array([top_left, top_right, bottom_left, bottom_right]) dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]]) tr = ProjectiveTransform() tr.estimate(dst, src) warped_image = warp(image, tr, output_shape=(L, L)) out = resize(warped_image, (500, 500)) return out
python
def warp_image_by_corner_points_projection(corner_points, image): """Given corner points of a Sudoku, warps original selection to a square image. :param corner_points: :type: corner_points: list :param image: :type image: :return: :rtype: """ # Clarify by storing in named variables. top_left, top_right, bottom_left, bottom_right = np.array(corner_points) top_edge = np.linalg.norm(top_right - top_left) bottom_edge = np.linalg.norm(bottom_right - bottom_left) left_edge = np.linalg.norm(top_left - bottom_left) right_edge = np.linalg.norm(top_right - bottom_right) L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge]))) src = np.array([top_left, top_right, bottom_left, bottom_right]) dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]]) tr = ProjectiveTransform() tr.estimate(dst, src) warped_image = warp(image, tr, output_shape=(L, L)) out = resize(warped_image, (500, 500)) return out
[ "def", "warp_image_by_corner_points_projection", "(", "corner_points", ",", "image", ")", ":", "# Clarify by storing in named variables.", "top_left", ",", "top_right", ",", "bottom_left", ",", "bottom_right", "=", "np", ".", "array", "(", "corner_points", ")", "top_edg...
Given corner points of a Sudoku, warps original selection to a square image. :param corner_points: :type: corner_points: list :param image: :type image: :return: :rtype:
[ "Given", "corner", "points", "of", "a", "Sudoku", "warps", "original", "selection", "to", "a", "square", "image", "." ]
train
https://github.com/hbldh/sudokuextract/blob/0dff3b46b9896a8bedfc474c61a089e7901f720c/sudokuextract/imgproc/geometry.py#L28-L56
Adarnof/adarnauth-esi
esi/clients.py
cache_spec
def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION)
python
def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION)
[ "def", "cache_spec", "(", "name", ",", "spec", ")", ":", "return", "cache", ".", "set", "(", "build_cache_name", "(", "name", ")", ",", "spec", ",", "app_settings", ".", "ESI_SPEC_CACHE_DURATION", ")" ]
Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached
[ "Cache", "the", "spec", "dict", ":", "param", "name", ":", "Version", "name", ":", "param", "spec", ":", "Spec", "dict", ":", "return", ":", "True", "if", "cached" ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L123-L130
Adarnof/adarnauth-esi
esi/clients.py
get_spec
def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config)
python
def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config)
[ "def", "get_spec", "(", "name", ",", "http_client", "=", "None", ",", "config", "=", "None", ")", ":", "http_client", "=", "http_client", "or", "requests_client", ".", "RequestsClient", "(", ")", "def", "load_spec", "(", ")", ":", "loader", "=", "Loader", ...
:param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec`
[ ":", "param", "name", ":", "Name", "of", "the", "revision", "of", "spec", "eg", "latest", "or", "v4", ":", "param", "http_client", ":", "Requests", "client", "used", "for", "retrieving", "specs", ":", "param", "config", ":", "Spec", "configuration", "-", ...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L142-L157
Adarnof/adarnauth-esi
esi/clients.py
build_spec
def build_spec(base_version, http_client=None, **kwargs): """ Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec` """ base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec
python
def build_spec(base_version, http_client=None, **kwargs): """ Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec` """ base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec
[ "def", "build_spec", "(", "base_version", ",", "http_client", "=", "None", ",", "*", "*", "kwargs", ")", ":", "base_spec", "=", "get_spec", "(", "base_version", ",", "http_client", "=", "http_client", ",", "config", "=", "SPEC_CONFIG", ")", "if", "kwargs", ...
Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec`
[ "Generates", "the", "Spec", "used", "to", "initialize", "a", "SwaggerClient", "supporting", "mixed", "resource", "versions", ":", "param", "http_client", ":", ":", "class", ":", "bravado", ".", "requests_client", ".", "RequestsClient", ":", "param", "base_version"...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L160-L178
Adarnof/adarnauth-esi
esi/clients.py
read_spec
def read_spec(path, http_client=None): """ Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec` """ with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG)
python
def read_spec(path, http_client=None): """ Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec` """ with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG)
[ "def", "read_spec", "(", "path", ",", "http_client", "=", "None", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "spec_dict", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "return", "SwaggerClient", ".", ...
Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec`
[ "Reads", "in", "a", "swagger", "spec", "file", "used", "to", "initialize", "a", "SwaggerClient", ":", "param", "path", ":", "String", "path", "to", "local", "swagger", "spec", "file", ".", ":", "param", "http_client", ":", ":", "class", ":", "bravado", "...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L181-L191
Adarnof/adarnauth-esi
esi/clients.py
esi_client_factory
def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): """ Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file. """ client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec)
python
def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): """ Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file. """ client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec)
[ "def", "esi_client_factory", "(", "token", "=", "None", ",", "datasource", "=", "None", ",", "spec_file", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "requests_client", ".", "RequestsClient", "(", ")", "if"...
Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file.
[ "Generates", "an", "ESI", "client", ".", ":", "param", "token", ":", ":", "class", ":", "esi", ".", "Token", "used", "to", "access", "authenticated", "endpoints", ".", ":", "param", "datasource", ":", "Name", "of", "the", "ESI", "datasource", "to", "acce...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L194-L218
Adarnof/adarnauth-esi
esi/clients.py
minimize_spec
def minimize_spec(spec_dict, operations=None, resources=None): """ Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict """ operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
python
def minimize_spec(spec_dict, operations=None, resources=None): """ Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict """ operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
[ "def", "minimize_spec", "(", "spec_dict", ",", "operations", "=", "None", ",", "resources", "=", "None", ")", ":", "operations", "=", "operations", "or", "[", "]", "resources", "=", "resources", "or", "[", "]", "# keep the ugly overhead for now but only add paths ...
Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict
[ "Trims", "down", "a", "source", "spec", "dict", "to", "only", "the", "operations", "or", "resources", "indicated", ".", ":", "param", "spec_dict", ":", "The", "source", "spec", "dict", "to", "minimize", ".", ":", "type", "spec_dict", ":", "dict", ":", "p...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L221-L247
Adarnof/adarnauth-esi
esi/clients.py
CachingHttpFuture._build_cache_key
def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash
python
def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash
[ "def", "_build_cache_key", "(", "request", ")", ":", "str_hash", "=", "md5", "(", "(", "request", ".", "method", "+", "request", ".", "url", "+", "str", "(", "request", ".", "params", ")", "+", "str", "(", "request", ".", "data", ")", "+", "str", "...
Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name
[ "Generated", "the", "key", "name", "used", "to", "cache", "responses", ":", "param", "request", ":", "request", "used", "to", "retrieve", "API", "response", ":", "return", ":", "formatted", "cache", "name" ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L32-L41
Adarnof/adarnauth-esi
esi/clients.py
CachingHttpFuture._time_to_expiry
def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0
python
def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0
[ "def", "_time_to_expiry", "(", "expires", ")", ":", "try", ":", "expires_dt", "=", "datetime", ".", "strptime", "(", "str", "(", "expires", ")", ",", "'%a, %d %b %Y %H:%M:%S %Z'", ")", "delta", "=", "expires_dt", "-", "datetime", ".", "utcnow", "(", ")", "...
Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time
[ "Determines", "the", "seconds", "until", "a", "HTTP", "header", "Expires", "timestamp", ":", "param", "expires", ":", "HTTP", "response", "Expires", "header", ":", "return", ":", "seconds", "until", "Expires", "time" ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L44-L55
20c/vodka
vodka/util.py
dict_get_path
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
python
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
[ "def", "dict_get_path", "(", "data", ",", "path", ",", "default", "=", "None", ")", ":", "keys", "=", "path", ".", "split", "(", "\".\"", ")", "for", "k", "in", "keys", ":", "if", "type", "(", "data", ")", "==", "list", ":", "found", "=", "False"...
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist
[ "Returns", "the", "value", "inside", "nested", "structure", "of", "data", "located", "at", "period", "delimited", "path" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/util.py#L2-L38
bioasp/caspo
caspo/core/hypergraph.py
HyperGraph.from_graph
def from_graph(cls, graph, length=0): """ Creates a hypergraph (expanded graph) from a :class:`caspo.core.graph.Graph` object instance Parameters ---------- graph : :class:`caspo.core.graph.Graph` The base interaction graph to be expanded length : int Maximum length for hyperedges source sets. If 0, use maximum possible in each case. Returns ------- caspo.core.hypergraph.HyperGraph Created object instance """ nodes = [] hyper = [] edges = defaultdict(list) j = 0 for i, node in enumerate(graph.nodes_iter()): nodes.append(node) preds = graph.in_edges(node, data=True) l = len(preds) if length > 0: l = min(length, l) for literals in it.chain.from_iterable(it.combinations(preds, r+1) for r in xrange(l)): valid = defaultdict(int) for source, _, _ in literals: valid[source] += 1 if all(it.imap(lambda c: c == 1, valid.values())): hyper.append(i) for source, _, data in literals: edges['hyper_idx'].append(j) edges['name'].append(source) edges['sign'].append(data['sign']) j += 1 nodes = pd.Series(nodes, name='name') hyper = pd.Series(hyper, name='node_idx') edges = pd.DataFrame(edges) return cls(nodes, hyper, edges)
python
def from_graph(cls, graph, length=0): """ Creates a hypergraph (expanded graph) from a :class:`caspo.core.graph.Graph` object instance Parameters ---------- graph : :class:`caspo.core.graph.Graph` The base interaction graph to be expanded length : int Maximum length for hyperedges source sets. If 0, use maximum possible in each case. Returns ------- caspo.core.hypergraph.HyperGraph Created object instance """ nodes = [] hyper = [] edges = defaultdict(list) j = 0 for i, node in enumerate(graph.nodes_iter()): nodes.append(node) preds = graph.in_edges(node, data=True) l = len(preds) if length > 0: l = min(length, l) for literals in it.chain.from_iterable(it.combinations(preds, r+1) for r in xrange(l)): valid = defaultdict(int) for source, _, _ in literals: valid[source] += 1 if all(it.imap(lambda c: c == 1, valid.values())): hyper.append(i) for source, _, data in literals: edges['hyper_idx'].append(j) edges['name'].append(source) edges['sign'].append(data['sign']) j += 1 nodes = pd.Series(nodes, name='name') hyper = pd.Series(hyper, name='node_idx') edges = pd.DataFrame(edges) return cls(nodes, hyper, edges)
[ "def", "from_graph", "(", "cls", ",", "graph", ",", "length", "=", "0", ")", ":", "nodes", "=", "[", "]", "hyper", "=", "[", "]", "edges", "=", "defaultdict", "(", "list", ")", "j", "=", "0", "for", "i", ",", "node", "in", "enumerate", "(", "gr...
Creates a hypergraph (expanded graph) from a :class:`caspo.core.graph.Graph` object instance Parameters ---------- graph : :class:`caspo.core.graph.Graph` The base interaction graph to be expanded length : int Maximum length for hyperedges source sets. If 0, use maximum possible in each case. Returns ------- caspo.core.hypergraph.HyperGraph Created object instance
[ "Creates", "a", "hypergraph", "(", "expanded", "graph", ")", "from", "a", ":", "class", ":", "caspo", ".", "core", ".", "graph", ".", "Graph", "object", "instance" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/hypergraph.py#L113-L161
bioasp/caspo
caspo/core/hypergraph.py
HyperGraph.to_funset
def to_funset(self): """ Converts the hypergraph to a set of `gringo.Fun`_ instances Returns ------- set Representation of the hypergraph as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, n in self.nodes.iteritems(): fs.add(gringo.Fun('node', [n, i])) for j, i in self.hyper.iteritems(): fs.add(gringo.Fun('hyper', [i, j, len(self.edges[self.edges.hyper_idx == j])])) for j, v, s in self.edges.itertuples(index=False): fs.add(gringo.Fun('edge', [j, v, s])) return fs
python
def to_funset(self): """ Converts the hypergraph to a set of `gringo.Fun`_ instances Returns ------- set Representation of the hypergraph as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = set() for i, n in self.nodes.iteritems(): fs.add(gringo.Fun('node', [n, i])) for j, i in self.hyper.iteritems(): fs.add(gringo.Fun('hyper', [i, j, len(self.edges[self.edges.hyper_idx == j])])) for j, v, s in self.edges.itertuples(index=False): fs.add(gringo.Fun('edge', [j, v, s])) return fs
[ "def", "to_funset", "(", "self", ")", ":", "fs", "=", "set", "(", ")", "for", "i", ",", "n", "in", "self", ".", "nodes", ".", "iteritems", "(", ")", ":", "fs", ".", "add", "(", "gringo", ".", "Fun", "(", "'node'", ",", "[", "n", ",", "i", "...
Converts the hypergraph to a set of `gringo.Fun`_ instances Returns ------- set Representation of the hypergraph as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
[ "Converts", "the", "hypergraph", "to", "a", "set", "of", "gringo", ".", "Fun", "_", "instances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/hypergraph.py#L163-L185
saghul/evergreen
evergreen/futures/_process.py
_process_worker
def _process_worker(call_queue, result_queue, shutdown): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a seperate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: try: call_item = call_queue.get(block=True, timeout=0.1) except queue.Empty: if shutdown.is_set(): return else: try: r = call_item() except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r))
python
def _process_worker(call_queue, result_queue, shutdown): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a seperate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: try: call_item = call_queue.get(block=True, timeout=0.1) except queue.Empty: if shutdown.is_set(): return else: try: r = call_item() except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r))
[ "def", "_process_worker", "(", "call_queue", ",", "result_queue", ",", "shutdown", ")", ":", "while", "True", ":", "try", ":", "call_item", "=", "call_queue", ".", "get", "(", "block", "=", "True", ",", "timeout", "=", "0.1", ")", "except", "queue", ".",...
Evaluates calls from call_queue and places the results in result_queue. This worker is run in a seperate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty.
[ "Evaluates", "calls", "from", "call_queue", "and", "places", "the", "results", "in", "result_queue", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/futures/_process.py#L142-L167
saghul/evergreen
evergreen/futures/_process.py
_queue_manangement_worker
def _queue_manangement_worker(executor_reference, processes, pending_work_items, work_ids_queue, call_queue, result_queue, shutdown_process_event): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. shutdown_process_event: A multiprocessing.Event used to signal the process workers that they should exit when their work queue is empty. """ while True: _add_call_item_to_queue(pending_work_items, work_ids_queue, call_queue) try: result_item = result_queue.get(block=True, timeout=0.1) except queue.Empty: executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if _shutdown or executor is None or executor._shutdown_thread: # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: shutdown_process_event.set() # If .join() is not called on the created processes then # some multiprocessing.Queue methods may deadlock on Mac OSX. for p in processes: p.join() return del executor else: work_item = pending_work_items.pop(result_item.work_id) loop = work_item.loop loop.call_from_thread(_set_work_result, work_item, result_item) del result_item, work_item, loop
python
def _queue_manangement_worker(executor_reference, processes, pending_work_items, work_ids_queue, call_queue, result_queue, shutdown_process_event): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. shutdown_process_event: A multiprocessing.Event used to signal the process workers that they should exit when their work queue is empty. """ while True: _add_call_item_to_queue(pending_work_items, work_ids_queue, call_queue) try: result_item = result_queue.get(block=True, timeout=0.1) except queue.Empty: executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if _shutdown or executor is None or executor._shutdown_thread: # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: shutdown_process_event.set() # If .join() is not called on the created processes then # some multiprocessing.Queue methods may deadlock on Mac OSX. for p in processes: p.join() return del executor else: work_item = pending_work_items.pop(result_item.work_id) loop = work_item.loop loop.call_from_thread(_set_work_result, work_item, result_item) del result_item, work_item, loop
[ "def", "_queue_manangement_worker", "(", "executor_reference", ",", "processes", ",", "pending_work_items", ",", "work_ids_queue", ",", "call_queue", ",", "result_queue", ",", "shutdown_process_event", ")", ":", "while", "True", ":", "_add_call_item_to_queue", "(", "pen...
Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. shutdown_process_event: A multiprocessing.Event used to signal the process workers that they should exit when their work queue is empty.
[ "Manages", "the", "communication", "between", "this", "process", "and", "the", "worker", "processes", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/futures/_process.py#L205-L262
20c/vodka
vodka/bartender.py
options
def options(f): """ Shared options, used by all bartender commands """ f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f) return f
python
def options(f): """ Shared options, used by all bartender commands """ f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f) return f
[ "def", "options", "(", "f", ")", ":", "f", "=", "click", ".", "option", "(", "'--config'", ",", "envvar", "=", "'VODKA_HOME'", ",", "default", "=", "click", ".", "get_app_dir", "(", "'vodka'", ")", ",", "help", "=", "\"location of config file\"", ")", "(...
Shared options, used by all bartender commands
[ "Shared", "options", "used", "by", "all", "bartender", "commands" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L28-L34
20c/vodka
vodka/bartender.py
check_config
def check_config(config): """ Check and validate configuration attributes, to help administrators quickly spot missing required configurations and invalid configuration values in general """ cfg = vodka.config.Config(read=config) vodka.log.set_loggers(cfg.get("logging")) vodka.app.load_all(cfg) click.echo("Checking config at %s for errors ..." % config) num_crit, num_warn = vodka.config.InstanceHandler.validate(cfg) click.echo("%d config ERRORS, %d config WARNINGS" % (num_crit, num_warn))
python
def check_config(config): """ Check and validate configuration attributes, to help administrators quickly spot missing required configurations and invalid configuration values in general """ cfg = vodka.config.Config(read=config) vodka.log.set_loggers(cfg.get("logging")) vodka.app.load_all(cfg) click.echo("Checking config at %s for errors ..." % config) num_crit, num_warn = vodka.config.InstanceHandler.validate(cfg) click.echo("%d config ERRORS, %d config WARNINGS" % (num_crit, num_warn))
[ "def", "check_config", "(", "config", ")", ":", "cfg", "=", "vodka", ".", "config", ".", "Config", "(", "read", "=", "config", ")", "vodka", ".", "log", ".", "set_loggers", "(", "cfg", ".", "get", "(", "\"logging\"", ")", ")", "vodka", ".", "app", ...
Check and validate configuration attributes, to help administrators quickly spot missing required configurations and invalid configuration values in general
[ "Check", "and", "validate", "configuration", "attributes", "to", "help", "administrators", "quickly", "spot", "missing", "required", "configurations", "and", "invalid", "configuration", "values", "in", "general" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L44-L61
20c/vodka
vodka/bartender.py
config
def config(config, skip_defaults): """ Generates configuration file from config specifications """ configurator = ClickConfigurator( vodka.plugin, skip_defaults=skip_defaults ) configurator.configure(vodka.config.instance, vodka.config.InstanceHandler) try: dst = munge_config.parse_url(config) except ValueError: config = os.path.join(config, "config.yaml") dst = munge_config.parse_url(config) config_dir = os.path.dirname(config) if not os.path.exists(config_dir) and config_dir: os.makedirs(config_dir) dst.cls().dumpu(vodka.config.instance, dst.url.path) if configurator.action_required: click.echo("") click.echo("not all required values could be set by this script, please manually edit the config and set the following values") click.echo("") for item in configurator.action_required: click.echo("- %s" % item) click.echo("") click.echo("Config written to %s" % dst.url.path)
python
def config(config, skip_defaults): """ Generates configuration file from config specifications """ configurator = ClickConfigurator( vodka.plugin, skip_defaults=skip_defaults ) configurator.configure(vodka.config.instance, vodka.config.InstanceHandler) try: dst = munge_config.parse_url(config) except ValueError: config = os.path.join(config, "config.yaml") dst = munge_config.parse_url(config) config_dir = os.path.dirname(config) if not os.path.exists(config_dir) and config_dir: os.makedirs(config_dir) dst.cls().dumpu(vodka.config.instance, dst.url.path) if configurator.action_required: click.echo("") click.echo("not all required values could be set by this script, please manually edit the config and set the following values") click.echo("") for item in configurator.action_required: click.echo("- %s" % item) click.echo("") click.echo("Config written to %s" % dst.url.path)
[ "def", "config", "(", "config", ",", "skip_defaults", ")", ":", "configurator", "=", "ClickConfigurator", "(", "vodka", ".", "plugin", ",", "skip_defaults", "=", "skip_defaults", ")", "configurator", ".", "configure", "(", "vodka", ".", "config", ".", "instanc...
Generates configuration file from config specifications
[ "Generates", "configuration", "file", "from", "config", "specifications" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L67-L99
20c/vodka
vodka/bartender.py
newapp
def newapp(path): """ Generates all files for a new vodka app at the specified location. Will generate to current directory if no path is specified """ app_path = os.path.join(VODKA_INSTALL_DIR, "resources", "blank_app") if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, "application.py")): click.error("There already exists a vodka app at %s, please specify a different path" % path) os.makedirs(os.path.join(path, "plugins")) shutil.copy(os.path.join(app_path, "application.py"), os.path.join(path, "application.py")) shutil.copy(os.path.join(app_path, "__init__.py"), os.path.join(path, "__init__.py")) shutil.copy(os.path.join(app_path, "plugins", "example.py"), os.path.join(path, "plugins", "example.py")) shutil.copy(os.path.join(app_path, "plugins", "__init__.py"), os.path.join(path, "plugins", "__init__.py"))
python
def newapp(path): """ Generates all files for a new vodka app at the specified location. Will generate to current directory if no path is specified """ app_path = os.path.join(VODKA_INSTALL_DIR, "resources", "blank_app") if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, "application.py")): click.error("There already exists a vodka app at %s, please specify a different path" % path) os.makedirs(os.path.join(path, "plugins")) shutil.copy(os.path.join(app_path, "application.py"), os.path.join(path, "application.py")) shutil.copy(os.path.join(app_path, "__init__.py"), os.path.join(path, "__init__.py")) shutil.copy(os.path.join(app_path, "plugins", "example.py"), os.path.join(path, "plugins", "example.py")) shutil.copy(os.path.join(app_path, "plugins", "__init__.py"), os.path.join(path, "plugins", "__init__.py"))
[ "def", "newapp", "(", "path", ")", ":", "app_path", "=", "os", ".", "path", ".", "join", "(", "VODKA_INSTALL_DIR", ",", "\"resources\"", ",", "\"blank_app\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "make...
Generates all files for a new vodka app at the specified location. Will generate to current directory if no path is specified
[ "Generates", "all", "files", "for", "a", "new", "vodka", "app", "at", "the", "specified", "location", "." ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L105-L121
20c/vodka
vodka/bartender.py
serve
def serve(config): """ Serves (runs) the vodka application """ cfg = vodka.config.Config(read=config) vodka.run(cfg, cfg)
python
def serve(config): """ Serves (runs) the vodka application """ cfg = vodka.config.Config(read=config) vodka.run(cfg, cfg)
[ "def", "serve", "(", "config", ")", ":", "cfg", "=", "vodka", ".", "config", ".", "Config", "(", "read", "=", "config", ")", "vodka", ".", "run", "(", "cfg", ",", "cfg", ")" ]
Serves (runs) the vodka application
[ "Serves", "(", "runs", ")", "the", "vodka", "application" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/bartender.py#L128-L134
ricardosasilva/pagseguro-python
pagseguro/validators.py
Email
def Email(msg=None): ''' Valida endereços de email ''' def f(v): if re.match("[\w\.\-]*@[\w\.\-]*\.\w+", str(v)): return str(v) else: raise Invalid(msg or ("Endereco de email invalido")) return f
python
def Email(msg=None): ''' Valida endereços de email ''' def f(v): if re.match("[\w\.\-]*@[\w\.\-]*\.\w+", str(v)): return str(v) else: raise Invalid(msg or ("Endereco de email invalido")) return f
[ "def", "Email", "(", "msg", "=", "None", ")", ":", "def", "f", "(", "v", ")", ":", "if", "re", ".", "match", "(", "\"[\\w\\.\\-]*@[\\w\\.\\-]*\\.\\w+\"", ",", "str", "(", "v", ")", ")", ":", "return", "str", "(", "v", ")", "else", ":", "raise", "...
Valida endereços de email
[ "Valida", "endereços", "de", "email" ]
train
https://github.com/ricardosasilva/pagseguro-python/blob/8e39d1b0585684c460b86073d1fb3f33112b5b3d/pagseguro/validators.py#L6-L15
dwaiter/django-bcrypt
django_bcrypt/models.py
is_enabled
def is_enabled(): """Returns ``True`` if bcrypt should be used.""" enabled = getattr(settings, "BCRYPT_ENABLED", True) if not enabled: return False # Are we under a test? if hasattr(mail, 'outbox'): return getattr(settings, "BCRYPT_ENABLED_UNDER_TEST", False) return True
python
def is_enabled(): """Returns ``True`` if bcrypt should be used.""" enabled = getattr(settings, "BCRYPT_ENABLED", True) if not enabled: return False # Are we under a test? if hasattr(mail, 'outbox'): return getattr(settings, "BCRYPT_ENABLED_UNDER_TEST", False) return True
[ "def", "is_enabled", "(", ")", ":", "enabled", "=", "getattr", "(", "settings", ",", "\"BCRYPT_ENABLED\"", ",", "True", ")", "if", "not", "enabled", ":", "return", "False", "# Are we under a test?", "if", "hasattr", "(", "mail", ",", "'outbox'", ")", ":", ...
Returns ``True`` if bcrypt should be used.
[ "Returns", "True", "if", "bcrypt", "should", "be", "used", "." ]
train
https://github.com/dwaiter/django-bcrypt/blob/913d86b2ba71334fd54670c6f0050bec02689654/django_bcrypt/models.py#L39-L47
dwaiter/django-bcrypt
django_bcrypt/models.py
bcrypt_check_password
def bcrypt_check_password(self, raw_password): """ Returns a boolean of whether the *raw_password* was correct. Attempts to validate with bcrypt, but falls back to Django's ``User.check_password()`` if the hash is incorrect. If ``BCRYPT_MIGRATE`` is set, attempts to convert sha1 password to bcrypt or converts between different bcrypt rounds values. .. note:: In case of a password migration this method calls ``User.save()`` to persist the changes. """ pwd_ok = False should_change = False if self.password.startswith('bc$'): salt_and_hash = self.password[3:] pwd_ok = bcrypt.hashpw(smart_str(raw_password), salt_and_hash) == salt_and_hash if pwd_ok: rounds = int(salt_and_hash.split('$')[2]) should_change = rounds != get_rounds() elif _check_password(self, raw_password): pwd_ok = True should_change = True if pwd_ok and should_change and is_enabled() and migrate_to_bcrypt(): self.set_password(raw_password) salt_and_hash = self.password[3:] assert bcrypt.hashpw(raw_password, salt_and_hash) == salt_and_hash self.save() return pwd_ok
python
def bcrypt_check_password(self, raw_password): """ Returns a boolean of whether the *raw_password* was correct. Attempts to validate with bcrypt, but falls back to Django's ``User.check_password()`` if the hash is incorrect. If ``BCRYPT_MIGRATE`` is set, attempts to convert sha1 password to bcrypt or converts between different bcrypt rounds values. .. note:: In case of a password migration this method calls ``User.save()`` to persist the changes. """ pwd_ok = False should_change = False if self.password.startswith('bc$'): salt_and_hash = self.password[3:] pwd_ok = bcrypt.hashpw(smart_str(raw_password), salt_and_hash) == salt_and_hash if pwd_ok: rounds = int(salt_and_hash.split('$')[2]) should_change = rounds != get_rounds() elif _check_password(self, raw_password): pwd_ok = True should_change = True if pwd_ok and should_change and is_enabled() and migrate_to_bcrypt(): self.set_password(raw_password) salt_and_hash = self.password[3:] assert bcrypt.hashpw(raw_password, salt_and_hash) == salt_and_hash self.save() return pwd_ok
[ "def", "bcrypt_check_password", "(", "self", ",", "raw_password", ")", ":", "pwd_ok", "=", "False", "should_change", "=", "False", "if", "self", ".", "password", ".", "startswith", "(", "'bc$'", ")", ":", "salt_and_hash", "=", "self", ".", "password", "[", ...
Returns a boolean of whether the *raw_password* was correct. Attempts to validate with bcrypt, but falls back to Django's ``User.check_password()`` if the hash is incorrect. If ``BCRYPT_MIGRATE`` is set, attempts to convert sha1 password to bcrypt or converts between different bcrypt rounds values. .. note:: In case of a password migration this method calls ``User.save()`` to persist the changes.
[ "Returns", "a", "boolean", "of", "whether", "the", "*", "raw_password", "*", "was", "correct", "." ]
train
https://github.com/dwaiter/django-bcrypt/blob/913d86b2ba71334fd54670c6f0050bec02689654/django_bcrypt/models.py#L55-L88
dwaiter/django-bcrypt
django_bcrypt/models.py
bcrypt_set_password
def bcrypt_set_password(self, raw_password): """ Sets the user's password to *raw_password*, hashed with bcrypt. """ if not is_enabled() or raw_password is None: _set_password(self, raw_password) else: salt = bcrypt.gensalt(get_rounds()) self.password = 'bc$' + bcrypt.hashpw(smart_str(raw_password), salt)
python
def bcrypt_set_password(self, raw_password): """ Sets the user's password to *raw_password*, hashed with bcrypt. """ if not is_enabled() or raw_password is None: _set_password(self, raw_password) else: salt = bcrypt.gensalt(get_rounds()) self.password = 'bc$' + bcrypt.hashpw(smart_str(raw_password), salt)
[ "def", "bcrypt_set_password", "(", "self", ",", "raw_password", ")", ":", "if", "not", "is_enabled", "(", ")", "or", "raw_password", "is", "None", ":", "_set_password", "(", "self", ",", "raw_password", ")", "else", ":", "salt", "=", "bcrypt", ".", "gensal...
Sets the user's password to *raw_password*, hashed with bcrypt.
[ "Sets", "the", "user", "s", "password", "to", "*", "raw_password", "*", "hashed", "with", "bcrypt", "." ]
train
https://github.com/dwaiter/django-bcrypt/blob/913d86b2ba71334fd54670c6f0050bec02689654/django_bcrypt/models.py#L91-L99
bharadwaj-raju/libdesktop
libdesktop/applications.py
mac_app_exists
def mac_app_exists(app): '''Check if 'app' is installed (OS X). Check if the given applications is installed on this OS X system. Args: app (str): The application name. Returns: bool: Is the app installed or not? ''' APP_CHECK_APPLESCRIPT = '''try tell application "Finder" set appname to name of application file id "%s" return 0 end tell on error err_msg number err_num return 1 end try''' with open('/tmp/app_check.AppleScript', 'w') as f: f.write(APP_CHECK_APPLESCRIPT % app) app_check_proc = sp.Popen( ['osascript', '-e', '/tmp/app_check.AppleScript']) if app_check_proc.wait() != 0: return False else: return True
python
def mac_app_exists(app): '''Check if 'app' is installed (OS X). Check if the given applications is installed on this OS X system. Args: app (str): The application name. Returns: bool: Is the app installed or not? ''' APP_CHECK_APPLESCRIPT = '''try tell application "Finder" set appname to name of application file id "%s" return 0 end tell on error err_msg number err_num return 1 end try''' with open('/tmp/app_check.AppleScript', 'w') as f: f.write(APP_CHECK_APPLESCRIPT % app) app_check_proc = sp.Popen( ['osascript', '-e', '/tmp/app_check.AppleScript']) if app_check_proc.wait() != 0: return False else: return True
[ "def", "mac_app_exists", "(", "app", ")", ":", "APP_CHECK_APPLESCRIPT", "=", "'''try\n\ttell application \"Finder\"\n\t\tset appname to name of application file id \"%s\"\n\t\treturn 0\n\tend tell\n\ton error err_msg number err_num\n\t\treturn 1\n\tend try'''", "with", "open", "(", "'/tmp/ap...
Check if 'app' is installed (OS X). Check if the given applications is installed on this OS X system. Args: app (str): The application name. Returns: bool: Is the app installed or not?
[ "Check", "if", "app", "is", "installed", "(", "OS", "X", ")", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/applications.py#L37-L68
bharadwaj-raju/libdesktop
libdesktop/applications.py
open_file_with_default_program
def open_file_with_default_program(file_path, background=False, return_cmd=False): '''Opens a file with the default program for that type. Open the file with the user's preferred application. Args: file_path (str) : Path to the file to be opened. background (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': open_file_cmd = 'explorer.exe ' + "'%s'" % file_path elif desktop_env == 'mac': open_file_cmd = 'open ' + "'%s'" % file_path else: file_mime_type = system.get_cmd_out( ['xdg-mime', 'query', 'filetype', file_path]) desktop_file = system.get_cmd_out( ['xdg-mime', 'query', 'default', file_mime_type]) open_file_cmd = desktopfile.execute(desktopfile.locate( desktop_file)[0], files=[file_path], return_cmd=True) if return_cmd: return open_file_cmd else: def_program_proc = sp.Popen(open_file_cmd, shell=True) if not background: def_program_proc.wait()
python
def open_file_with_default_program(file_path, background=False, return_cmd=False): '''Opens a file with the default program for that type. Open the file with the user's preferred application. Args: file_path (str) : Path to the file to be opened. background (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': open_file_cmd = 'explorer.exe ' + "'%s'" % file_path elif desktop_env == 'mac': open_file_cmd = 'open ' + "'%s'" % file_path else: file_mime_type = system.get_cmd_out( ['xdg-mime', 'query', 'filetype', file_path]) desktop_file = system.get_cmd_out( ['xdg-mime', 'query', 'default', file_mime_type]) open_file_cmd = desktopfile.execute(desktopfile.locate( desktop_file)[0], files=[file_path], return_cmd=True) if return_cmd: return open_file_cmd else: def_program_proc = sp.Popen(open_file_cmd, shell=True) if not background: def_program_proc.wait()
[ "def", "open_file_with_default_program", "(", "file_path", ",", "background", "=", "False", ",", "return_cmd", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "==", "'windows'", ":", "open_file_cmd", "=", "...
Opens a file with the default program for that type. Open the file with the user's preferred application. Args: file_path (str) : Path to the file to be opened. background (bool): Run the program in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command to run the program (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the program is returned instead of running it. Else returns nothing.
[ "Opens", "a", "file", "with", "the", "default", "program", "for", "that", "type", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/applications.py#L71-L109
bharadwaj-raju/libdesktop
libdesktop/applications.py
terminal
def terminal(exec_='', background=False, shell_after_cmd_exec=False, keep_open_after_cmd_exec=False, return_cmd=False): '''Start the default terminal emulator. Start the user's preferred terminal emulator, optionally running a command in it. **Order of starting** Windows: Powershell Mac: - iTerm2 - Terminal.app Linux/Unix: - ``$TERMINAL`` - ``x-terminal-emulator`` - Terminator - Desktop environment's terminal - gnome-terminal - urxvt - rxvt - xterm Args: exec\_ (str) : An optional command to run in the opened terminal emulator. Defaults to empty (no command). background (bool): Run the terminal in the background, instead of waiting for completion. Defaults to ``False``. shell_after_cmd_exec (bool): Start the user's shell after running the command (see exec_). Defaults to `False`. return_cmd (bool): Returns the command used to start the terminal (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, returns the command to run the terminal instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if not exec_: shell_after_cmd_exec = True if desktop_env == 'windows': terminal_cmd_str = 'start powershell.exe' if desktop_env == 'mac': # Try iTerm2 first, apparently most popular Mac Terminal if mac_app_exists('iTerm2'): terminal_cmd_str = 'open -a iTerm2' else: terminal_cmd_str = 'open -a Terminal' else: # sensible-terminal if os.getenv('TERMINAL'): # Not everywhere, but if user *really* has a preference, they will # set this terminal_cmd_str = os.getenv('TERMINAL') elif system.is_in_path('x-terminal-emulator'): # This is a convenience script that launches terminal based on # user preferences. # This is not available on some distros (but most have it) # so try this first terminal_cmd_str = 'x-terminal-emulator' elif system.is_in_path('terminator'): terminal_cmd_str = 'terminator' elif desktop_env in ['gnome', 'unity', 'cinnamon', 'gnome2']: terminal_cmd_str = 'gnome-terminal' elif desktop_env == 'xfce4': terminal_cmd_str = 'xfce4-terminal' elif desktop_env == 'kde' or desktop_env == 'trinity': terminal_cmd_str = 'konsole' elif desktop_env == 'mate': terminal_cmd_str = 'mate-terminal' elif desktop_env == 'i3': terminal_cmd_str = 'i3-sensible-terminal' elif desktop_env == 'pantheon': terminal_cmd_str = 'pantheon-terminal' elif desktop_env == 'enlightenment': terminal_cmd_str = 'terminology' elif desktop_env == 'lxde' or desktop_env == 'lxqt': terminal_cmd_str = 'lxterminal' else: if system.is_in_path('gnome-terminal'): terminal_cmd_str = 'gnome-terminal' elif system.is_in_path('urxvt'): terminal_cmd_str = 'urxvt' elif system.is_in_path('rxvt'): terminal_cmd_str = 'rxvt' elif system.is_in_path('xterm'): terminal_cmd_str = 'xterm' if exec_: if desktop_env == 'windows': if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; pause' if os.path.isfile(exec_): terminal_cmd_str += exec_ else: terminal_cmd_str += ' -Command ' + '"' + exec_ + '"' if shell_after_cmd_exec: terminal_cmd_str += ' -NoExit' else: if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; read' if shell_after_cmd_exec: exec_ += '; ' + os.getenv('SHELL') if desktop_env == 'mac': terminal_cmd_str += ' sh -c {}'.format(shlex.quote(exec_)) else: terminal_cmd_str += ' -e {}'.format( shlex.quote('sh -c {}'.format(shlex.quote(exec_)))) if return_cmd: return terminal_cmd_str terminal_proc = sp.Popen([terminal_cmd_str], shell=True, stdout=sp.PIPE) if not background: # Wait for process to complete terminal_proc.wait()
python
def terminal(exec_='', background=False, shell_after_cmd_exec=False, keep_open_after_cmd_exec=False, return_cmd=False): '''Start the default terminal emulator. Start the user's preferred terminal emulator, optionally running a command in it. **Order of starting** Windows: Powershell Mac: - iTerm2 - Terminal.app Linux/Unix: - ``$TERMINAL`` - ``x-terminal-emulator`` - Terminator - Desktop environment's terminal - gnome-terminal - urxvt - rxvt - xterm Args: exec\_ (str) : An optional command to run in the opened terminal emulator. Defaults to empty (no command). background (bool): Run the terminal in the background, instead of waiting for completion. Defaults to ``False``. shell_after_cmd_exec (bool): Start the user's shell after running the command (see exec_). Defaults to `False`. return_cmd (bool): Returns the command used to start the terminal (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, returns the command to run the terminal instead of running it. Else returns nothing. ''' desktop_env = system.get_name() if not exec_: shell_after_cmd_exec = True if desktop_env == 'windows': terminal_cmd_str = 'start powershell.exe' if desktop_env == 'mac': # Try iTerm2 first, apparently most popular Mac Terminal if mac_app_exists('iTerm2'): terminal_cmd_str = 'open -a iTerm2' else: terminal_cmd_str = 'open -a Terminal' else: # sensible-terminal if os.getenv('TERMINAL'): # Not everywhere, but if user *really* has a preference, they will # set this terminal_cmd_str = os.getenv('TERMINAL') elif system.is_in_path('x-terminal-emulator'): # This is a convenience script that launches terminal based on # user preferences. # This is not available on some distros (but most have it) # so try this first terminal_cmd_str = 'x-terminal-emulator' elif system.is_in_path('terminator'): terminal_cmd_str = 'terminator' elif desktop_env in ['gnome', 'unity', 'cinnamon', 'gnome2']: terminal_cmd_str = 'gnome-terminal' elif desktop_env == 'xfce4': terminal_cmd_str = 'xfce4-terminal' elif desktop_env == 'kde' or desktop_env == 'trinity': terminal_cmd_str = 'konsole' elif desktop_env == 'mate': terminal_cmd_str = 'mate-terminal' elif desktop_env == 'i3': terminal_cmd_str = 'i3-sensible-terminal' elif desktop_env == 'pantheon': terminal_cmd_str = 'pantheon-terminal' elif desktop_env == 'enlightenment': terminal_cmd_str = 'terminology' elif desktop_env == 'lxde' or desktop_env == 'lxqt': terminal_cmd_str = 'lxterminal' else: if system.is_in_path('gnome-terminal'): terminal_cmd_str = 'gnome-terminal' elif system.is_in_path('urxvt'): terminal_cmd_str = 'urxvt' elif system.is_in_path('rxvt'): terminal_cmd_str = 'rxvt' elif system.is_in_path('xterm'): terminal_cmd_str = 'xterm' if exec_: if desktop_env == 'windows': if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; pause' if os.path.isfile(exec_): terminal_cmd_str += exec_ else: terminal_cmd_str += ' -Command ' + '"' + exec_ + '"' if shell_after_cmd_exec: terminal_cmd_str += ' -NoExit' else: if keep_open_after_cmd_exec and not shell_after_cmd_exec: exec_ += '; read' if shell_after_cmd_exec: exec_ += '; ' + os.getenv('SHELL') if desktop_env == 'mac': terminal_cmd_str += ' sh -c {}'.format(shlex.quote(exec_)) else: terminal_cmd_str += ' -e {}'.format( shlex.quote('sh -c {}'.format(shlex.quote(exec_)))) if return_cmd: return terminal_cmd_str terminal_proc = sp.Popen([terminal_cmd_str], shell=True, stdout=sp.PIPE) if not background: # Wait for process to complete terminal_proc.wait()
[ "def", "terminal", "(", "exec_", "=", "''", ",", "background", "=", "False", ",", "shell_after_cmd_exec", "=", "False", ",", "keep_open_after_cmd_exec", "=", "False", ",", "return_cmd", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "...
Start the default terminal emulator. Start the user's preferred terminal emulator, optionally running a command in it. **Order of starting** Windows: Powershell Mac: - iTerm2 - Terminal.app Linux/Unix: - ``$TERMINAL`` - ``x-terminal-emulator`` - Terminator - Desktop environment's terminal - gnome-terminal - urxvt - rxvt - xterm Args: exec\_ (str) : An optional command to run in the opened terminal emulator. Defaults to empty (no command). background (bool): Run the terminal in the background, instead of waiting for completion. Defaults to ``False``. shell_after_cmd_exec (bool): Start the user's shell after running the command (see exec_). Defaults to `False`. return_cmd (bool): Returns the command used to start the terminal (str) instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, returns the command to run the terminal instead of running it. Else returns nothing.
[ "Start", "the", "default", "terminal", "emulator", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/applications.py#L112-L253
bharadwaj-raju/libdesktop
libdesktop/applications.py
text_editor
def text_editor(file='', background=False, return_cmd=False): '''Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': editor_cmd_str = system.get_cmd_out( ['ftype', 'textfile']).split('=', 1)[1] elif desktop_env == 'mac': editor_cmd_str = 'open -a' + system.get_cmd_out( ['def', 'read', 'com.apple.LaunchServices', 'LSHandlers' '-array' '{LSHandlerContentType=public.plain-text;}'] ) else: # Use def handler for MIME-type text/plain editor_cmd_str = system.get_cmd_out( ['xdg-mime', 'query', 'default', 'text/plain']) if '\n' in editor_cmd_str: # Sometimes locate returns multiple results # use first one editor_cmd_str = editor_cmd_str.split('\n')[0] if editor_cmd_str.endswith('.desktop'): # We don't use desktopfile.execute() in order to have working # return_cmd and background editor_cmd_str = desktopfile.parse( desktopfile.locate(editor_cmd_str)[0])['Exec'] for i in editor_cmd_str.split(): if i.startswith('%'): # %-style formatters editor_cmd_str = editor_cmd_str.replace(i, '') if i == '--new-document': # Gedit editor_cmd_str = editor_cmd_str.replace(i, '') if file: editor_cmd_str += ' {}'.format(shlex.quote(file)) if return_cmd: return editor_cmd_str text_editor_proc = sp.Popen([editor_cmd_str], shell=True) if not background: text_editor_proc.wait()
python
def text_editor(file='', background=False, return_cmd=False): '''Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing. ''' desktop_env = system.get_name() if desktop_env == 'windows': editor_cmd_str = system.get_cmd_out( ['ftype', 'textfile']).split('=', 1)[1] elif desktop_env == 'mac': editor_cmd_str = 'open -a' + system.get_cmd_out( ['def', 'read', 'com.apple.LaunchServices', 'LSHandlers' '-array' '{LSHandlerContentType=public.plain-text;}'] ) else: # Use def handler for MIME-type text/plain editor_cmd_str = system.get_cmd_out( ['xdg-mime', 'query', 'default', 'text/plain']) if '\n' in editor_cmd_str: # Sometimes locate returns multiple results # use first one editor_cmd_str = editor_cmd_str.split('\n')[0] if editor_cmd_str.endswith('.desktop'): # We don't use desktopfile.execute() in order to have working # return_cmd and background editor_cmd_str = desktopfile.parse( desktopfile.locate(editor_cmd_str)[0])['Exec'] for i in editor_cmd_str.split(): if i.startswith('%'): # %-style formatters editor_cmd_str = editor_cmd_str.replace(i, '') if i == '--new-document': # Gedit editor_cmd_str = editor_cmd_str.replace(i, '') if file: editor_cmd_str += ' {}'.format(shlex.quote(file)) if return_cmd: return editor_cmd_str text_editor_proc = sp.Popen([editor_cmd_str], shell=True) if not background: text_editor_proc.wait()
[ "def", "text_editor", "(", "file", "=", "''", ",", "background", "=", "False", ",", "return_cmd", "=", "False", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "==", "'windows'", ":", "editor_cmd_str", "=", "system",...
Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
[ "Starts", "the", "default", "graphical", "text", "editor", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/applications.py#L256-L322
LIVVkit/LIVVkit
livvkit/scheduler.py
run
def run(run_type, module, config): """ Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module """ print(" -----------------------------------------------------------------") print(" Beginning " + run_type.lower() + " test suite ") print(" -----------------------------------------------------------------") print("") summary = run_quiet(module, config) print(" -----------------------------------------------------------------") print(" " + run_type.capitalize() + " test suite complete ") print(" -----------------------------------------------------------------") print("") return summary
python
def run(run_type, module, config): """ Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module """ print(" -----------------------------------------------------------------") print(" Beginning " + run_type.lower() + " test suite ") print(" -----------------------------------------------------------------") print("") summary = run_quiet(module, config) print(" -----------------------------------------------------------------") print(" " + run_type.capitalize() + " test suite complete ") print(" -----------------------------------------------------------------") print("") return summary
[ "def", "run", "(", "run_type", ",", "module", ",", "config", ")", ":", "print", "(", "\" -----------------------------------------------------------------\"", ")", "print", "(", "\" Beginning \"", "+", "run_type", ".", "lower", "(", ")", "+", "\" test suite \"", "...
Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module
[ "Collects", "the", "analyses", "cases", "to", "be", "run", "and", "launches", "processes", "for", "each", "of", "them", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/scheduler.py#L40-L59
LIVVkit/LIVVkit
livvkit/scheduler.py
launch_processes
def launch_processes(tests, run_module, group=True, **config): """ Helper method to launch processes and sync output """ manager = multiprocessing.Manager() test_summaries = manager.dict() process_handles = [multiprocessing.Process(target=run_module.run_suite, args=(test, config[test], test_summaries)) for test in tests] for p in process_handles: p.start() for p in process_handles: p.join() if group: summary = run_module.populate_metadata(tests[0], config[tests[0]]) summary["Data"] = dict(test_summaries) return summary else: test_summaries = dict(test_summaries) summary = [] for ii, test in enumerate(tests): summary.append(run_module.populate_metadata(test, config[test])) if summary[ii]: summary[ii]['Data'] = {test: test_summaries[test]} return summary
python
def launch_processes(tests, run_module, group=True, **config): """ Helper method to launch processes and sync output """ manager = multiprocessing.Manager() test_summaries = manager.dict() process_handles = [multiprocessing.Process(target=run_module.run_suite, args=(test, config[test], test_summaries)) for test in tests] for p in process_handles: p.start() for p in process_handles: p.join() if group: summary = run_module.populate_metadata(tests[0], config[tests[0]]) summary["Data"] = dict(test_summaries) return summary else: test_summaries = dict(test_summaries) summary = [] for ii, test in enumerate(tests): summary.append(run_module.populate_metadata(test, config[test])) if summary[ii]: summary[ii]['Data'] = {test: test_summaries[test]} return summary
[ "def", "launch_processes", "(", "tests", ",", "run_module", ",", "group", "=", "True", ",", "*", "*", "config", ")", ":", "manager", "=", "multiprocessing", ".", "Manager", "(", ")", "test_summaries", "=", "manager", ".", "dict", "(", ")", "process_handles...
Helper method to launch processes and sync output
[ "Helper", "method", "to", "launch", "processes", "and", "sync", "output" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/scheduler.py#L68-L90
lordmauve/lepton
examples/generate.py
on_resize
def on_resize(width, height): """Setup 3D projection""" glViewport(0, 0, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(30, 1.0*width/height, 0.1, 1000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
python
def on_resize(width, height): """Setup 3D projection""" glViewport(0, 0, width, height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(30, 1.0*width/height, 0.1, 1000.0) glMatrixMode(GL_MODELVIEW) glLoadIdentity()
[ "def", "on_resize", "(", "width", ",", "height", ")", ":", "glViewport", "(", "0", ",", "0", ",", "width", ",", "height", ")", "glMatrixMode", "(", "GL_PROJECTION", ")", "glLoadIdentity", "(", ")", "gluPerspective", "(", "30", ",", "1.0", "*", "width", ...
Setup 3D projection
[ "Setup", "3D", "projection" ]
train
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/generate.py#L32-L39
Sanji-IO/sanji
sanji/core.py
Route
def Route(resource=None, methods=["get", "post", "put", "delete"], schema=None): """ route """ def _route(func): def wrapper(self, *args, **kwargs): # "test" argument means no wrap func this time, # return original func immediately. if kwargs.get("test", False): kwargs.pop("test") func(self, *args, **kwargs) _methods = methods if isinstance(methods, str): _methods = [methods] route = self.router.route(resource) for method in _methods: getattr(route, method)(func, schema) # Ordered by declare sequence # http://stackoverflow.com/questions/4459531/how-to-read-class-attributes-in-the-same-order-as-declared f_locals = sys._getframe(1).f_locals _order = len([v for v in f_locals.itervalues() if hasattr(v, '__call__') and hasattr(v, '__name__') and v.__name__ == "wrapper"]) wrapper.__dict__["_order"] = _order return wrapper return _route
python
def Route(resource=None, methods=["get", "post", "put", "delete"], schema=None): """ route """ def _route(func): def wrapper(self, *args, **kwargs): # "test" argument means no wrap func this time, # return original func immediately. if kwargs.get("test", False): kwargs.pop("test") func(self, *args, **kwargs) _methods = methods if isinstance(methods, str): _methods = [methods] route = self.router.route(resource) for method in _methods: getattr(route, method)(func, schema) # Ordered by declare sequence # http://stackoverflow.com/questions/4459531/how-to-read-class-attributes-in-the-same-order-as-declared f_locals = sys._getframe(1).f_locals _order = len([v for v in f_locals.itervalues() if hasattr(v, '__call__') and hasattr(v, '__name__') and v.__name__ == "wrapper"]) wrapper.__dict__["_order"] = _order return wrapper return _route
[ "def", "Route", "(", "resource", "=", "None", ",", "methods", "=", "[", "\"get\"", ",", "\"post\"", ",", "\"put\"", ",", "\"delete\"", "]", ",", "schema", "=", "None", ")", ":", "def", "_route", "(", "func", ")", ":", "def", "wrapper", "(", "self", ...
route
[ "route" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L441-L469
Sanji-IO/sanji
sanji/core.py
Sanji._register_routes
def _register_routes(self, methods): """ _register_routes """ # setup routes by decorator methods = [(n, v) for (n, v) in methods if v.__name__ == "wrapper"] methods = sorted(methods, key=lambda x: x[1]._order) for name, value in methods: value() # execute setting route return methods
python
def _register_routes(self, methods): """ _register_routes """ # setup routes by decorator methods = [(n, v) for (n, v) in methods if v.__name__ == "wrapper"] methods = sorted(methods, key=lambda x: x[1]._order) for name, value in methods: value() # execute setting route return methods
[ "def", "_register_routes", "(", "self", ",", "methods", ")", ":", "# setup routes by decorator", "methods", "=", "[", "(", "n", ",", "v", ")", "for", "(", "n", ",", "v", ")", "in", "methods", "if", "v", ".", "__name__", "==", "\"wrapper\"", "]", "metho...
_register_routes
[ "_register_routes" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L100-L110
Sanji-IO/sanji
sanji/core.py
Sanji._dispatch_message
def _dispatch_message(self): """ _dispatch_message """ while True: message = self.req_queue.get() if message is None: _logger.debug("_dispatch_message thread is terminated") return if message._type != MessageType.EVENT: self.__dispatch_message(message) elif message._type == MessageType.EVENT: self.__dispatch_event_message(message)
python
def _dispatch_message(self): """ _dispatch_message """ while True: message = self.req_queue.get() if message is None: _logger.debug("_dispatch_message thread is terminated") return if message._type != MessageType.EVENT: self.__dispatch_message(message) elif message._type == MessageType.EVENT: self.__dispatch_event_message(message)
[ "def", "_dispatch_message", "(", "self", ")", ":", "while", "True", ":", "message", "=", "self", ".", "req_queue", ".", "get", "(", ")", "if", "message", "is", "None", ":", "_logger", ".", "debug", "(", "\"_dispatch_message thread is terminated\"", ")", "ret...
_dispatch_message
[ "_dispatch_message" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L112-L125
Sanji-IO/sanji
sanji/core.py
Sanji._resolve_responses
def _resolve_responses(self): """ _resolve_responses """ while True: message = self.res_queue.get() if message is None: _logger.debug("_resolve_responses thread is terminated") return self.__resolve_responses(message)
python
def _resolve_responses(self): """ _resolve_responses """ while True: message = self.res_queue.get() if message is None: _logger.debug("_resolve_responses thread is terminated") return self.__resolve_responses(message)
[ "def", "_resolve_responses", "(", "self", ")", ":", "while", "True", ":", "message", "=", "self", ".", "res_queue", ".", "get", "(", ")", "if", "message", "is", "None", ":", "_logger", ".", "debug", "(", "\"_resolve_responses thread is terminated\"", ")", "r...
_resolve_responses
[ "_resolve_responses" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L186-L195
Sanji-IO/sanji
sanji/core.py
Sanji.start
def start(self): """ start """ def main_thread(): # create resp, req thread pool self._create_thread_pool() # start connection, this will block until stop() self.conn_thread = Thread(target=self._conn.connect) self.conn_thread.daemon = True self.conn_thread.start() # register model to controller... self.is_ready.wait() if hasattr(self, 'run'): _logger.debug("Start running...") self.run() # start main_thread self.main_thread = Thread(target=main_thread) self.main_thread.daemon = True self.main_thread.start() if threading.current_thread().__class__.__name__ == '_MainThread': # control this bundle stop or not while not self.stop_event.wait(1): sleep(1) else: self.stop_event.wait() self.stop() _logger.debug("Shutdown successfully")
python
def start(self): """ start """ def main_thread(): # create resp, req thread pool self._create_thread_pool() # start connection, this will block until stop() self.conn_thread = Thread(target=self._conn.connect) self.conn_thread.daemon = True self.conn_thread.start() # register model to controller... self.is_ready.wait() if hasattr(self, 'run'): _logger.debug("Start running...") self.run() # start main_thread self.main_thread = Thread(target=main_thread) self.main_thread.daemon = True self.main_thread.start() if threading.current_thread().__class__.__name__ == '_MainThread': # control this bundle stop or not while not self.stop_event.wait(1): sleep(1) else: self.stop_event.wait() self.stop() _logger.debug("Shutdown successfully")
[ "def", "start", "(", "self", ")", ":", "def", "main_thread", "(", ")", ":", "# create resp, req thread pool", "self", ".", "_create_thread_pool", "(", ")", "# start connection, this will block until stop()", "self", ".", "conn_thread", "=", "Thread", "(", "target", ...
start
[ "start" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L230-L262
Sanji-IO/sanji
sanji/core.py
Sanji.stop
def stop(self, *args, **kwargs): """ exit """ _logger.debug("Bundle [%s] has been shutting down" % self.bundle.profile["name"]) if hasattr(self, 'before_stop') and \ hasattr(self.before_stop, '__call__'): _logger.debug("Invoking before_stop...") self.before_stop() self._conn.disconnect() self._session.stop() self.stop_event.set() # TODO: shutdown all threads for thread, stop in self.thread_list: stop() for thread, stop in self.thread_list: thread.join() self.is_ready.clear()
python
def stop(self, *args, **kwargs): """ exit """ _logger.debug("Bundle [%s] has been shutting down" % self.bundle.profile["name"]) if hasattr(self, 'before_stop') and \ hasattr(self.before_stop, '__call__'): _logger.debug("Invoking before_stop...") self.before_stop() self._conn.disconnect() self._session.stop() self.stop_event.set() # TODO: shutdown all threads for thread, stop in self.thread_list: stop() for thread, stop in self.thread_list: thread.join() self.is_ready.clear()
[ "def", "stop", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_logger", ".", "debug", "(", "\"Bundle [%s] has been shutting down\"", "%", "self", ".", "bundle", ".", "profile", "[", "\"name\"", "]", ")", "if", "hasattr", "(", "self", ...
exit
[ "exit" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L271-L292
Sanji-IO/sanji
sanji/core.py
Sanji.on_sanji_message
def on_sanji_message(self, client, userdata, msg): """This function will recevie all message from mqtt client the client instance for this callback userdata the private user data as set in Client() or userdata_set() message an instance of MQTTMessage. This is a class with members topic, payload, qos, retain. """ try: message = Message(msg.payload) except (TypeError, ValueError) as e: _logger.error(e, exc_info=True) return if message.type() == MessageType.UNKNOWN: _logger.debug("Got an UNKNOWN message, don't dispatch") return if message.type() == MessageType.RESPONSE: self.res_queue.put(message) if message.type() == MessageType.REQUEST or \ message.type() == MessageType.DIRECT or \ message.type() == MessageType.HOOK or \ message.type() == MessageType.EVENT: self.req_queue.put(message)
python
def on_sanji_message(self, client, userdata, msg): """This function will recevie all message from mqtt client the client instance for this callback userdata the private user data as set in Client() or userdata_set() message an instance of MQTTMessage. This is a class with members topic, payload, qos, retain. """ try: message = Message(msg.payload) except (TypeError, ValueError) as e: _logger.error(e, exc_info=True) return if message.type() == MessageType.UNKNOWN: _logger.debug("Got an UNKNOWN message, don't dispatch") return if message.type() == MessageType.RESPONSE: self.res_queue.put(message) if message.type() == MessageType.REQUEST or \ message.type() == MessageType.DIRECT or \ message.type() == MessageType.HOOK or \ message.type() == MessageType.EVENT: self.req_queue.put(message)
[ "def", "on_sanji_message", "(", "self", ",", "client", ",", "userdata", ",", "msg", ")", ":", "try", ":", "message", "=", "Message", "(", "msg", ".", "payload", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "e", ":", "_logger", ".", ...
This function will recevie all message from mqtt client the client instance for this callback userdata the private user data as set in Client() or userdata_set() message an instance of MQTTMessage. This is a class with members topic, payload, qos, retain.
[ "This", "function", "will", "recevie", "all", "message", "from", "mqtt", "client", "the", "client", "instance", "for", "this", "callback", "userdata", "the", "private", "user", "data", "as", "set", "in", "Client", "()", "or", "userdata_set", "()", "message", ...
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L294-L321
Sanji-IO/sanji
sanji/core.py
Sanji.on_connect
def on_connect(self, client, userdata, flags, rc): """ on_connect(self, client, obj, flags, rc): client the client instance for this callback userdata the private user data as set in Client() or userdata_set() flags response flags sent by the broker rc the connection result """ _logger.debug("Connection established with result code %s" % rc) if self.reg_thread is not None and self.reg_thread.is_alive(): _logger.debug("Joining previous reg_thread") self.reg_thread.join() def reg(): delay = None if hasattr(self.reg_delay, '__call__'): delay = self.reg_delay() else: delay = self.reg_delay sleep(delay) self._conn.set_tunnels(self._conn.tunnels) model_profile = self.get_profile("model") view_profile = self.get_profile("view") self.deregister(model_profile) self.deregister(view_profile) self.register(model_profile) self.register(view_profile) self.is_ready.set() self.reg_thread = Thread(target=reg) self.reg_thread.daemon = True self.reg_thread.start()
python
def on_connect(self, client, userdata, flags, rc): """ on_connect(self, client, obj, flags, rc): client the client instance for this callback userdata the private user data as set in Client() or userdata_set() flags response flags sent by the broker rc the connection result """ _logger.debug("Connection established with result code %s" % rc) if self.reg_thread is not None and self.reg_thread.is_alive(): _logger.debug("Joining previous reg_thread") self.reg_thread.join() def reg(): delay = None if hasattr(self.reg_delay, '__call__'): delay = self.reg_delay() else: delay = self.reg_delay sleep(delay) self._conn.set_tunnels(self._conn.tunnels) model_profile = self.get_profile("model") view_profile = self.get_profile("view") self.deregister(model_profile) self.deregister(view_profile) self.register(model_profile) self.register(view_profile) self.is_ready.set() self.reg_thread = Thread(target=reg) self.reg_thread.daemon = True self.reg_thread.start()
[ "def", "on_connect", "(", "self", ",", "client", ",", "userdata", ",", "flags", ",", "rc", ")", ":", "_logger", ".", "debug", "(", "\"Connection established with result code %s\"", "%", "rc", ")", "if", "self", ".", "reg_thread", "is", "not", "None", "and", ...
on_connect(self, client, obj, flags, rc): client the client instance for this callback userdata the private user data as set in Client() or userdata_set() flags response flags sent by the broker rc the connection result
[ "on_connect", "(", "self", "client", "obj", "flags", "rc", ")", ":", "client", "the", "client", "instance", "for", "this", "callback", "userdata", "the", "private", "user", "data", "as", "set", "in", "Client", "()", "or", "userdata_set", "()", "flags", "re...
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L323-L360
Sanji-IO/sanji
sanji/core.py
Sanji.register
def register(self, reg_data, retry=True, interval=1, timeout=3): """ register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success """ if len(reg_data["resources"]) == 0: _logger.debug("%s no need to register due to no resources" % (reg_data["name"])) return def _register(): try: resp = self.publish.direct.post( "/controller/registration", reg_data) if resp.code == 200: return resp except TimeoutError: _logger.debug("Register message is timeout") return False resp = _register() while resp is False: _logger.debug("Register failed.") self.deregister(reg_data) resp = _register() if resp is None: _logger.error("Can\'t not register to controller") self.stop() return False self._conn.set_tunnel( reg_data["role"], resp.data["tunnel"], self.on_sanji_message) self.bundle.profile["currentTunnels"] = [ tunnel for tunnel, callback in self._conn.tunnels.items()] self.bundle.profile["regCount"] = \ self.bundle.profile.get("reg_count", 0) + 1 _logger.debug("Register successfully %s tunnel: %s" % (reg_data["name"], resp.data["tunnel"],))
python
def register(self, reg_data, retry=True, interval=1, timeout=3): """ register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success """ if len(reg_data["resources"]) == 0: _logger.debug("%s no need to register due to no resources" % (reg_data["name"])) return def _register(): try: resp = self.publish.direct.post( "/controller/registration", reg_data) if resp.code == 200: return resp except TimeoutError: _logger.debug("Register message is timeout") return False resp = _register() while resp is False: _logger.debug("Register failed.") self.deregister(reg_data) resp = _register() if resp is None: _logger.error("Can\'t not register to controller") self.stop() return False self._conn.set_tunnel( reg_data["role"], resp.data["tunnel"], self.on_sanji_message) self.bundle.profile["currentTunnels"] = [ tunnel for tunnel, callback in self._conn.tunnels.items()] self.bundle.profile["regCount"] = \ self.bundle.profile.get("reg_count", 0) + 1 _logger.debug("Register successfully %s tunnel: %s" % (reg_data["name"], resp.data["tunnel"],))
[ "def", "register", "(", "self", ",", "reg_data", ",", "retry", "=", "True", ",", "interval", "=", "1", ",", "timeout", "=", "3", ")", ":", "if", "len", "(", "reg_data", "[", "\"resources\"", "]", ")", "==", "0", ":", "_logger", ".", "debug", "(", ...
register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success
[ "register", "function", "retry", "True", "infinity", "retries", "False", "no", "retries", "Number", "retries", "times", "interval", "time", "period", "for", "retry", "return", "False", "if", "no", "success", "Tunnel", "if", "success" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L362-L410
Sanji-IO/sanji
sanji/core.py
Sanji.deregister
def deregister(self, reg_data, retry=True, interval=1, timeout=3): """ Deregister model/view of this bundle """ Retry(target=self.publish.direct.delete, args=("/controller/registration", reg_data,), kwargs={"timeout": timeout}, options={"retry": retry, "interval": interval}) _logger.debug("Deregister successfully %s tunnel: %s" % (reg_data["name"], self._conn.tunnels[reg_data["role"]][0],))
python
def deregister(self, reg_data, retry=True, interval=1, timeout=3): """ Deregister model/view of this bundle """ Retry(target=self.publish.direct.delete, args=("/controller/registration", reg_data,), kwargs={"timeout": timeout}, options={"retry": retry, "interval": interval}) _logger.debug("Deregister successfully %s tunnel: %s" % (reg_data["name"], self._conn.tunnels[reg_data["role"]][0],))
[ "def", "deregister", "(", "self", ",", "reg_data", ",", "retry", "=", "True", ",", "interval", "=", "1", ",", "timeout", "=", "3", ")", ":", "Retry", "(", "target", "=", "self", ".", "publish", ".", "direct", ".", "delete", ",", "args", "=", "(", ...
Deregister model/view of this bundle
[ "Deregister", "model", "/", "view", "of", "this", "bundle" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L412-L422
jneight/django-xadmin-extras
xadmin_extras/ext/mailfactory/__init__.py
MailListView.get_context
def get_context(self): """Add mails to the context """ context = super(MailListView, self).get_context() mail_list = registered_mails_names() context['mail_map'] = mail_list return context
python
def get_context(self): """Add mails to the context """ context = super(MailListView, self).get_context() mail_list = registered_mails_names() context['mail_map'] = mail_list return context
[ "def", "get_context", "(", "self", ")", ":", "context", "=", "super", "(", "MailListView", ",", "self", ")", ".", "get_context", "(", ")", "mail_list", "=", "registered_mails_names", "(", ")", "context", "[", "'mail_map'", "]", "=", "mail_list", "return", ...
Add mails to the context
[ "Add", "mails", "to", "the", "context" ]
train
https://github.com/jneight/django-xadmin-extras/blob/a7909a3a4c1620b550202d3f0aa357503cc15a29/xadmin_extras/ext/mailfactory/__init__.py#L30-L38
malexer/s3io
s3io.py
url_split
def url_split(s3_url): """Split S3 URL and return a tuple of (bucket, key). S3 URL is expected to be of "s3://<bucket>/<key>" format. """ assert isinstance(s3_url, str) match = re_s3_url.match(s3_url) if not match: raise UrlParseError('Error parsing S3 URL: "%s"' % s3_url) parts = match.groupdict() return (parts['bucket'], parts['key'])
python
def url_split(s3_url): """Split S3 URL and return a tuple of (bucket, key). S3 URL is expected to be of "s3://<bucket>/<key>" format. """ assert isinstance(s3_url, str) match = re_s3_url.match(s3_url) if not match: raise UrlParseError('Error parsing S3 URL: "%s"' % s3_url) parts = match.groupdict() return (parts['bucket'], parts['key'])
[ "def", "url_split", "(", "s3_url", ")", ":", "assert", "isinstance", "(", "s3_url", ",", "str", ")", "match", "=", "re_s3_url", ".", "match", "(", "s3_url", ")", "if", "not", "match", ":", "raise", "UrlParseError", "(", "'Error parsing S3 URL: \"%s\"'", "%",...
Split S3 URL and return a tuple of (bucket, key). S3 URL is expected to be of "s3://<bucket>/<key>" format.
[ "Split", "S3", "URL", "and", "return", "a", "tuple", "of", "(", "bucket", "key", ")", "." ]
train
https://github.com/malexer/s3io/blob/95188b150d2e02357843f9228efb7b1b605e09ba/s3io.py#L32-L45
malexer/s3io
s3io.py
open
def open(s3_url, mode='r', s3_connection=None, **kwargs): """Open S3 url, returning a File Object. S3 connection: 1. Can be specified directly by `s3_connection`. 2. `boto.connect_s3` will be used supplying all `kwargs`. - `aws_access_key_id` and `aws_secret_access_key`. - `profile_name` - recommended. See: http://boto.readthedocs.org/en/latest/boto_config_tut.html """ connection = s3_connection or boto.connect_s3(**kwargs) bucket_name, key_name = url_split(s3_url) try: bucket = connection.get_bucket(bucket_name) except boto.exception.S3ResponseError: raise BucketNotFoundError('Bucket "%s" was not found.' % bucket_name) f = NamedTemporaryFile() try: if 'w' in mode.lower(): s3_key = bucket.new_key(key_name) yield f f.seek(0) s3_key.set_contents_from_file(f) else: s3_key = bucket.get_key(key_name) if not s3_key: raise KeyNotFoundError('Key "%s" was not found.' % s3_url) s3_key.get_file(f) f.seek(0) yield f finally: f.close()
python
def open(s3_url, mode='r', s3_connection=None, **kwargs): """Open S3 url, returning a File Object. S3 connection: 1. Can be specified directly by `s3_connection`. 2. `boto.connect_s3` will be used supplying all `kwargs`. - `aws_access_key_id` and `aws_secret_access_key`. - `profile_name` - recommended. See: http://boto.readthedocs.org/en/latest/boto_config_tut.html """ connection = s3_connection or boto.connect_s3(**kwargs) bucket_name, key_name = url_split(s3_url) try: bucket = connection.get_bucket(bucket_name) except boto.exception.S3ResponseError: raise BucketNotFoundError('Bucket "%s" was not found.' % bucket_name) f = NamedTemporaryFile() try: if 'w' in mode.lower(): s3_key = bucket.new_key(key_name) yield f f.seek(0) s3_key.set_contents_from_file(f) else: s3_key = bucket.get_key(key_name) if not s3_key: raise KeyNotFoundError('Key "%s" was not found.' % s3_url) s3_key.get_file(f) f.seek(0) yield f finally: f.close()
[ "def", "open", "(", "s3_url", ",", "mode", "=", "'r'", ",", "s3_connection", "=", "None", ",", "*", "*", "kwargs", ")", ":", "connection", "=", "s3_connection", "or", "boto", ".", "connect_s3", "(", "*", "*", "kwargs", ")", "bucket_name", ",", "key_nam...
Open S3 url, returning a File Object. S3 connection: 1. Can be specified directly by `s3_connection`. 2. `boto.connect_s3` will be used supplying all `kwargs`. - `aws_access_key_id` and `aws_secret_access_key`. - `profile_name` - recommended. See: http://boto.readthedocs.org/en/latest/boto_config_tut.html
[ "Open", "S3", "url", "returning", "a", "File", "Object", "." ]
train
https://github.com/malexer/s3io/blob/95188b150d2e02357843f9228efb7b1b605e09ba/s3io.py#L49-L88
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/numeric.py
Numeric.is_decimal
def is_decimal(self): """Determine if a data record is of the type float.""" dt = DATA_TYPES['decimal'] if type(self.data) in dt['type']: self.type = 'DECIMAL' num_split = str(self.data).split('.', 1) self.len = len(num_split[0]) self.len_decimal = len(num_split[1]) return True
python
def is_decimal(self): """Determine if a data record is of the type float.""" dt = DATA_TYPES['decimal'] if type(self.data) in dt['type']: self.type = 'DECIMAL' num_split = str(self.data).split('.', 1) self.len = len(num_split[0]) self.len_decimal = len(num_split[1]) return True
[ "def", "is_decimal", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'decimal'", "]", "if", "type", "(", "self", ".", "data", ")", "in", "dt", "[", "'type'", "]", ":", "self", ".", "type", "=", "'DECIMAL'", "num_split", "=", "str", "(", "self...
Determine if a data record is of the type float.
[ "Determine", "if", "a", "data", "record", "is", "of", "the", "type", "float", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/numeric.py#L31-L39
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/numeric.py
Numeric._is_numeric_data
def _is_numeric_data(self, data_type): """Private method for testing text data types.""" dt = DATA_TYPES[data_type] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = data_type.upper() self.len = len(str(self.data)) return True
python
def _is_numeric_data(self, data_type): """Private method for testing text data types.""" dt = DATA_TYPES[data_type] if dt['min'] and dt['max']: if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']: self.type = data_type.upper() self.len = len(str(self.data)) return True
[ "def", "_is_numeric_data", "(", "self", ",", "data_type", ")", ":", "dt", "=", "DATA_TYPES", "[", "data_type", "]", "if", "dt", "[", "'min'", "]", "and", "dt", "[", "'max'", "]", ":", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", ...
Private method for testing text data types.
[ "Private", "method", "for", "testing", "text", "data", "types", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/numeric.py#L41-L48
PGower/PyCanvas
pycanvas/apis/enrollment_terms.py
EnrollmentTermsAPI.create_enrollment_term
def create_enrollment_term(self, account_id, enrollment_term_end_at=None, enrollment_term_name=None, enrollment_term_sis_term_id=None, enrollment_term_start_at=None): """ Create enrollment term. Create a new enrollment term for the specified account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - enrollment_term[name] """The name of the term.""" if enrollment_term_name is not None: data["enrollment_term[name]"] = enrollment_term_name # OPTIONAL - enrollment_term[start_at] """The day/time the term starts. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_start_at is not None: data["enrollment_term[start_at]"] = enrollment_term_start_at # OPTIONAL - enrollment_term[end_at] """The day/time the term ends. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_end_at is not None: data["enrollment_term[end_at]"] = enrollment_term_end_at # OPTIONAL - enrollment_term[sis_term_id] """The unique SIS identifier for the term.""" if enrollment_term_sis_term_id is not None: data["enrollment_term[sis_term_id]"] = enrollment_term_sis_term_id self.logger.debug("POST /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, single_item=True)
python
def create_enrollment_term(self, account_id, enrollment_term_end_at=None, enrollment_term_name=None, enrollment_term_sis_term_id=None, enrollment_term_start_at=None): """ Create enrollment term. Create a new enrollment term for the specified account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - enrollment_term[name] """The name of the term.""" if enrollment_term_name is not None: data["enrollment_term[name]"] = enrollment_term_name # OPTIONAL - enrollment_term[start_at] """The day/time the term starts. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_start_at is not None: data["enrollment_term[start_at]"] = enrollment_term_start_at # OPTIONAL - enrollment_term[end_at] """The day/time the term ends. Accepts times in ISO 8601 format, e.g. 2015-01-10T18:48:00Z.""" if enrollment_term_end_at is not None: data["enrollment_term[end_at]"] = enrollment_term_end_at # OPTIONAL - enrollment_term[sis_term_id] """The unique SIS identifier for the term.""" if enrollment_term_sis_term_id is not None: data["enrollment_term[sis_term_id]"] = enrollment_term_sis_term_id self.logger.debug("POST /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, single_item=True)
[ "def", "create_enrollment_term", "(", "self", ",", "account_id", ",", "enrollment_term_end_at", "=", "None", ",", "enrollment_term_name", "=", "None", ",", "enrollment_term_sis_term_id", "=", "None", ",", "enrollment_term_start_at", "=", "None", ")", ":", "path", "=...
Create enrollment term. Create a new enrollment term for the specified account.
[ "Create", "enrollment", "term", ".", "Create", "a", "new", "enrollment", "term", "for", "the", "specified", "account", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/enrollment_terms.py#L19-L56
PGower/PyCanvas
pycanvas/apis/enrollment_terms.py
EnrollmentTermsAPI.list_enrollment_terms
def list_enrollment_terms(self, account_id, workflow_state=None): """ List enrollment terms. Return all of the terms in the account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - workflow_state """If set, only returns terms that are in the given state. Defaults to 'active'.""" if workflow_state is not None: self._validate_enum(workflow_state, ["active", "deleted", "all"]) params["workflow_state"] = workflow_state self.logger.debug("GET /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, data_key='enrollment_terms', all_pages=True)
python
def list_enrollment_terms(self, account_id, workflow_state=None): """ List enrollment terms. Return all of the terms in the account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - workflow_state """If set, only returns terms that are in the given state. Defaults to 'active'.""" if workflow_state is not None: self._validate_enum(workflow_state, ["active", "deleted", "all"]) params["workflow_state"] = workflow_state self.logger.debug("GET /api/v1/accounts/{account_id}/terms with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/terms".format(**path), data=data, params=params, data_key='enrollment_terms', all_pages=True)
[ "def", "list_enrollment_terms", "(", "self", ",", "account_id", ",", "workflow_state", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_i...
List enrollment terms. Return all of the terms in the account.
[ "List", "enrollment", "terms", ".", "Return", "all", "of", "the", "terms", "in", "the", "account", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/enrollment_terms.py#L122-L144
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.from_model
def from_model(cls, model_instance, default_value=False, **kwargs): """ wrapper for Model's get_attribute_filter """ if not isinstance(model_instance, DataCollection): raise TypeError("model_instance must be a subclass of \ prestans.types.DataCollection, %s given" % (model_instance.__class__.__name__)) elif isinstance(model_instance, Array) and model_instance.is_scalar: return AttributeFilter(is_array_scalar=True) attribute_filter_instance = model_instance.get_attribute_filter(default_value) # kwargs support for name, value in iter(kwargs.items()): if name in attribute_filter_instance: setattr(attribute_filter_instance, name, value) else: raise KeyError(name) return attribute_filter_instance
python
def from_model(cls, model_instance, default_value=False, **kwargs): """ wrapper for Model's get_attribute_filter """ if not isinstance(model_instance, DataCollection): raise TypeError("model_instance must be a subclass of \ prestans.types.DataCollection, %s given" % (model_instance.__class__.__name__)) elif isinstance(model_instance, Array) and model_instance.is_scalar: return AttributeFilter(is_array_scalar=True) attribute_filter_instance = model_instance.get_attribute_filter(default_value) # kwargs support for name, value in iter(kwargs.items()): if name in attribute_filter_instance: setattr(attribute_filter_instance, name, value) else: raise KeyError(name) return attribute_filter_instance
[ "def", "from_model", "(", "cls", ",", "model_instance", ",", "default_value", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "model_instance", ",", "DataCollection", ")", ":", "raise", "TypeError", "(", "\"model_instance must ...
wrapper for Model's get_attribute_filter
[ "wrapper", "for", "Model", "s", "get_attribute_filter" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L40-L59
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.blueprint
def blueprint(self): """ :return: blueprint :rtype: dict """ blueprint = dict() for key in self.keys(): blueprint[key] = self.is_attribute_visible(key) return blueprint
python
def blueprint(self): """ :return: blueprint :rtype: dict """ blueprint = dict() for key in self.keys(): blueprint[key] = self.is_attribute_visible(key) return blueprint
[ "def", "blueprint", "(", "self", ")", ":", "blueprint", "=", "dict", "(", ")", "for", "key", "in", "self", ".", "keys", "(", ")", ":", "blueprint", "[", "key", "]", "=", "self", ".", "is_attribute_visible", "(", "key", ")", "return", "blueprint" ]
:return: blueprint :rtype: dict
[ ":", "return", ":", "blueprint", ":", "rtype", ":", "dict" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L61-L70
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.conforms_to_template_filter
def conforms_to_template_filter(self, template_filter): """ Check AttributeFilter conforms to the rules set by the template - If self, has attributes that template_filter does not contain, throw Exception - If sub list found, perform the first check - If self has a value for an attribute, assign to final AttributeFilter - If not found, assign value from template todo: rename as current name is mis-leading """ if not isinstance(template_filter, self.__class__): raise TypeError("AttributeFilter can only check conformance against \ another template filter, %s provided" % template_filter.__class__.__name__) # keys from the template template_filter_keys = template_filter.keys() # Keys from the object itself this_filter_keys = self.keys() # 1. Check to see if the client has provided unwanted keys unwanted_keys = set(this_filter_keys) - set(template_filter_keys) if len(unwanted_keys) > 0: raise exception.AttributeFilterDiffers(list(unwanted_keys)) # 2. Make a attribute_filter that we send back evaluated_attribute_filter = AttributeFilter() # 3. Evaluate the differences between the two, with template_filter as the standard for template_key in template_filter_keys: if template_key in this_filter_keys: value = getattr(self, template_key) # if sub filter and boolean provided with of true, create default filter with value of true if isinstance(value, bool) and \ value is True and \ isinstance(getattr(template_filter, template_key), AttributeFilter): setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) elif isinstance(value, bool): setattr(evaluated_attribute_filter, template_key, value) elif isinstance(value, self.__class__): # Attribute lists sort themselves out, to produce sub Attribute Filters template_sub_list = getattr(template_filter, template_key) this_sub_list = getattr(self, template_key) setattr( evaluated_attribute_filter, template_key, this_sub_list.conforms_to_template_filter(template_sub_list) ) else: setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) return evaluated_attribute_filter
python
def conforms_to_template_filter(self, template_filter): """ Check AttributeFilter conforms to the rules set by the template - If self, has attributes that template_filter does not contain, throw Exception - If sub list found, perform the first check - If self has a value for an attribute, assign to final AttributeFilter - If not found, assign value from template todo: rename as current name is mis-leading """ if not isinstance(template_filter, self.__class__): raise TypeError("AttributeFilter can only check conformance against \ another template filter, %s provided" % template_filter.__class__.__name__) # keys from the template template_filter_keys = template_filter.keys() # Keys from the object itself this_filter_keys = self.keys() # 1. Check to see if the client has provided unwanted keys unwanted_keys = set(this_filter_keys) - set(template_filter_keys) if len(unwanted_keys) > 0: raise exception.AttributeFilterDiffers(list(unwanted_keys)) # 2. Make a attribute_filter that we send back evaluated_attribute_filter = AttributeFilter() # 3. Evaluate the differences between the two, with template_filter as the standard for template_key in template_filter_keys: if template_key in this_filter_keys: value = getattr(self, template_key) # if sub filter and boolean provided with of true, create default filter with value of true if isinstance(value, bool) and \ value is True and \ isinstance(getattr(template_filter, template_key), AttributeFilter): setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) elif isinstance(value, bool): setattr(evaluated_attribute_filter, template_key, value) elif isinstance(value, self.__class__): # Attribute lists sort themselves out, to produce sub Attribute Filters template_sub_list = getattr(template_filter, template_key) this_sub_list = getattr(self, template_key) setattr( evaluated_attribute_filter, template_key, this_sub_list.conforms_to_template_filter(template_sub_list) ) else: setattr(evaluated_attribute_filter, template_key, getattr(template_filter, template_key)) return evaluated_attribute_filter
[ "def", "conforms_to_template_filter", "(", "self", ",", "template_filter", ")", ":", "if", "not", "isinstance", "(", "template_filter", ",", "self", ".", "__class__", ")", ":", "raise", "TypeError", "(", "\"AttributeFilter can only check conformance against \\\n ...
Check AttributeFilter conforms to the rules set by the template - If self, has attributes that template_filter does not contain, throw Exception - If sub list found, perform the first check - If self has a value for an attribute, assign to final AttributeFilter - If not found, assign value from template todo: rename as current name is mis-leading
[ "Check", "AttributeFilter", "conforms", "to", "the", "rules", "set", "by", "the", "template" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L72-L126
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.keys
def keys(self): """ :returns: a list of usable keys :rtype: list """ keys = list() for attribute_name, type_instance in inspect.getmembers(self): # ignore parameters with __ and if they are methods if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue keys.append(attribute_name) return keys
python
def keys(self): """ :returns: a list of usable keys :rtype: list """ keys = list() for attribute_name, type_instance in inspect.getmembers(self): # ignore parameters with __ and if they are methods if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue keys.append(attribute_name) return keys
[ "def", "keys", "(", "self", ")", ":", "keys", "=", "list", "(", ")", "for", "attribute_name", ",", "type_instance", "in", "inspect", ".", "getmembers", "(", "self", ")", ":", "# ignore parameters with __ and if they are methods", "if", "attribute_name", ".", "st...
:returns: a list of usable keys :rtype: list
[ ":", "returns", ":", "a", "list", "of", "usable", "keys", ":", "rtype", ":", "list" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L128-L146
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.is_filter_at_key
def is_filter_at_key(self, key): """ return True if attribute is a sub filter """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, self.__class__): return True return False
python
def is_filter_at_key(self, key): """ return True if attribute is a sub filter """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, self.__class__): return True return False
[ "def", "is_filter_at_key", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ":", "attribute_status", "=", "getattr", "(", "self", ",", "key", ")", "if", "isinstance", "(", "attribute_status", ",", "self", ".", "__class__", ")", ":", "return"...
return True if attribute is a sub filter
[ "return", "True", "if", "attribute", "is", "a", "sub", "filter" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L151-L161
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.is_attribute_visible
def is_attribute_visible(self, key): """ Returns True if an attribute is visible If attribute is an instance of AttributeFilter, it returns True if all attributes of the sub filter are visible. :param key: name of attribute to check :type key: str :return: whether attribute is visible :rtype: bool """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, bool) and attribute_status is True: return True elif isinstance(attribute_status, self.__class__) and attribute_status.are_any_attributes_visible(): return True return False
python
def is_attribute_visible(self, key): """ Returns True if an attribute is visible If attribute is an instance of AttributeFilter, it returns True if all attributes of the sub filter are visible. :param key: name of attribute to check :type key: str :return: whether attribute is visible :rtype: bool """ if key in self: attribute_status = getattr(self, key) if isinstance(attribute_status, bool) and attribute_status is True: return True elif isinstance(attribute_status, self.__class__) and attribute_status.are_any_attributes_visible(): return True return False
[ "def", "is_attribute_visible", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ":", "attribute_status", "=", "getattr", "(", "self", ",", "key", ")", "if", "isinstance", "(", "attribute_status", ",", "bool", ")", "and", "attribute_status", "i...
Returns True if an attribute is visible If attribute is an instance of AttributeFilter, it returns True if all attributes of the sub filter are visible. :param key: name of attribute to check :type key: str :return: whether attribute is visible :rtype: bool
[ "Returns", "True", "if", "an", "attribute", "is", "visible", "If", "attribute", "is", "an", "instance", "of", "AttributeFilter", "it", "returns", "True", "if", "all", "attributes", "of", "the", "sub", "filter", "are", "visible", "." ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L163-L181
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.are_any_attributes_visible
def are_any_attributes_visible(self): """ checks to see if any attributes are set to true """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue if isinstance(type_instance, bool) and type_instance is True: return True elif isinstance(type_instance, self.__class__) and type_instance.are_all_attributes_visible() is True: return True return False
python
def are_any_attributes_visible(self): """ checks to see if any attributes are set to true """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue if isinstance(type_instance, bool) and type_instance is True: return True elif isinstance(type_instance, self.__class__) and type_instance.are_all_attributes_visible() is True: return True return False
[ "def", "are_any_attributes_visible", "(", "self", ")", ":", "for", "attribute_name", ",", "type_instance", "in", "inspect", ".", "getmembers", "(", "self", ")", ":", "if", "attribute_name", ".", "startswith", "(", "'__'", ")", "or", "inspect", ".", "ismethod",...
checks to see if any attributes are set to true
[ "checks", "to", "see", "if", "any", "attributes", "are", "set", "to", "true" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L183-L198
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.set_all_attribute_values
def set_all_attribute_values(self, value): """ sets all the attribute values to the value and propagate to any children """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): # Ignore parameters with __ and if they are methods continue if isinstance(type_instance, bool): self.__dict__[attribute_name] = value elif isinstance(type_instance, self.__class__): type_instance.set_all_attribute_values(value)
python
def set_all_attribute_values(self, value): """ sets all the attribute values to the value and propagate to any children """ for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): # Ignore parameters with __ and if they are methods continue if isinstance(type_instance, bool): self.__dict__[attribute_name] = value elif isinstance(type_instance, self.__class__): type_instance.set_all_attribute_values(value)
[ "def", "set_all_attribute_values", "(", "self", ",", "value", ")", ":", "for", "attribute_name", ",", "type_instance", "in", "inspect", ".", "getmembers", "(", "self", ")", ":", "if", "attribute_name", ".", "startswith", "(", "'__'", ")", "or", "inspect", "....
sets all the attribute values to the value and propagate to any children
[ "sets", "all", "the", "attribute", "values", "to", "the", "value", "and", "propagate", "to", "any", "children" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L218-L232
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter.as_dict
def as_dict(self): """ turns attribute filter object into python dictionary """ output_dictionary = dict() for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue if isinstance(type_instance, bool): output_dictionary[attribute_name] = type_instance elif isinstance(type_instance, self.__class__): output_dictionary[attribute_name] = type_instance.as_dict() return output_dictionary
python
def as_dict(self): """ turns attribute filter object into python dictionary """ output_dictionary = dict() for attribute_name, type_instance in inspect.getmembers(self): if attribute_name.startswith('__') or inspect.ismethod(type_instance): continue if isinstance(type_instance, bool): output_dictionary[attribute_name] = type_instance elif isinstance(type_instance, self.__class__): output_dictionary[attribute_name] = type_instance.as_dict() return output_dictionary
[ "def", "as_dict", "(", "self", ")", ":", "output_dictionary", "=", "dict", "(", ")", "for", "attribute_name", ",", "type_instance", "in", "inspect", ".", "getmembers", "(", "self", ")", ":", "if", "attribute_name", ".", "startswith", "(", "'__'", ")", "or"...
turns attribute filter object into python dictionary
[ "turns", "attribute", "filter", "object", "into", "python", "dictionary" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L234-L251
anomaly/prestans
prestans/parser/attribute_filter.py
AttributeFilter._init_from_dictionary
def _init_from_dictionary(self, from_dictionary, template_model=None): """ Private helper to init values from a dictionary, wraps children into AttributeFilter objects :param from_dictionary: dictionary to get attribute names and visibility from :type from_dictionary: dict :param template_model: :type template_model: DataCollection """ if not isinstance(from_dictionary, dict): raise TypeError("from_dictionary must be of type dict, %s \ provided" % from_dictionary.__class__.__name__) rewrite_map = None if template_model is not None: if not isinstance(template_model, DataCollection): msg = "template_model should be a prestans model %s provided" % template_model.__class__.__name__ raise TypeError(msg) rewrite_map = template_model.attribute_rewrite_reverse_map() for key, value in iter(from_dictionary.items()): target_key = key # minify support if rewrite_map is not None: target_key = rewrite_map[key] # ensure that the key exists in the template model if template_model is not None and target_key not in template_model: unwanted_keys = list() unwanted_keys.append(target_key) raise exception.AttributeFilterDiffers(unwanted_keys) # check to see we can work with the value if not isinstance(value, (bool, dict)): raise TypeError("AttributeFilter input for key %s must be \ boolean or dict, %s provided" % (key, value.__class__.__name__)) # Either keep the value of wrap it up with AttributeFilter if isinstance(value, bool): setattr(self, target_key, value) elif isinstance(value, dict): sub_map = None if template_model is not None: sub_map = getattr(template_model, target_key) # prestans Array support if isinstance(sub_map, Array): sub_map = sub_map.element_template setattr(self, target_key, AttributeFilter(from_dictionary=value, template_model=sub_map))
python
def _init_from_dictionary(self, from_dictionary, template_model=None): """ Private helper to init values from a dictionary, wraps children into AttributeFilter objects :param from_dictionary: dictionary to get attribute names and visibility from :type from_dictionary: dict :param template_model: :type template_model: DataCollection """ if not isinstance(from_dictionary, dict): raise TypeError("from_dictionary must be of type dict, %s \ provided" % from_dictionary.__class__.__name__) rewrite_map = None if template_model is not None: if not isinstance(template_model, DataCollection): msg = "template_model should be a prestans model %s provided" % template_model.__class__.__name__ raise TypeError(msg) rewrite_map = template_model.attribute_rewrite_reverse_map() for key, value in iter(from_dictionary.items()): target_key = key # minify support if rewrite_map is not None: target_key = rewrite_map[key] # ensure that the key exists in the template model if template_model is not None and target_key not in template_model: unwanted_keys = list() unwanted_keys.append(target_key) raise exception.AttributeFilterDiffers(unwanted_keys) # check to see we can work with the value if not isinstance(value, (bool, dict)): raise TypeError("AttributeFilter input for key %s must be \ boolean or dict, %s provided" % (key, value.__class__.__name__)) # Either keep the value of wrap it up with AttributeFilter if isinstance(value, bool): setattr(self, target_key, value) elif isinstance(value, dict): sub_map = None if template_model is not None: sub_map = getattr(template_model, target_key) # prestans Array support if isinstance(sub_map, Array): sub_map = sub_map.element_template setattr(self, target_key, AttributeFilter(from_dictionary=value, template_model=sub_map))
[ "def", "_init_from_dictionary", "(", "self", ",", "from_dictionary", ",", "template_model", "=", "None", ")", ":", "if", "not", "isinstance", "(", "from_dictionary", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"from_dictionary must be of type dict, %s \\\n ...
Private helper to init values from a dictionary, wraps children into AttributeFilter objects :param from_dictionary: dictionary to get attribute names and visibility from :type from_dictionary: dict :param template_model: :type template_model: DataCollection
[ "Private", "helper", "to", "init", "values", "from", "a", "dictionary", "wraps", "children", "into", "AttributeFilter", "objects" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/attribute_filter.py#L253-L311
bioasp/caspo
caspo/predict.py
Predictor.predict
def predict(self): """ Computes all possible weighted average predictions and their variances Example:: >>> from caspo import core, predict >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> predictor = predict.Predictor(networks, setup) >>> df = predictor.predict() >>> df.to_csv('predictions.csv'), index=False) Returns -------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ self._logger.info("Computing all predictions and their variance for %s logical networks...", len(self.networks)) return self.networks.predictions(self.setup.filter(self.networks))
python
def predict(self): """ Computes all possible weighted average predictions and their variances Example:: >>> from caspo import core, predict >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> predictor = predict.Predictor(networks, setup) >>> df = predictor.predict() >>> df.to_csv('predictions.csv'), index=False) Returns -------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe """ self._logger.info("Computing all predictions and their variance for %s logical networks...", len(self.networks)) return self.networks.predictions(self.setup.filter(self.networks))
[ "def", "predict", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"Computing all predictions and their variance for %s logical networks...\"", ",", "len", "(", "self", ".", "networks", ")", ")", "return", "self", ".", "networks", ".", "predictio...
Computes all possible weighted average predictions and their variances Example:: >>> from caspo import core, predict >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> predictor = predict.Predictor(networks, setup) >>> df = predictor.predict() >>> df.to_csv('predictions.csv'), index=False) Returns -------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
[ "Computes", "all", "possible", "weighted", "average", "predictions", "and", "their", "variances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/predict.py#L52-L79
shmir/PyIxNetwork
ixnetwork/ixn_root.py
IxnRoot.get_traffic_items
def get_traffic_items(self): """ :return: dictionary {name: object} of all traffic items. """ traffic = self.get_child_static('traffic') return {o.obj_name(): o for o in traffic.get_objects_or_children_by_type('trafficItem')}
python
def get_traffic_items(self): """ :return: dictionary {name: object} of all traffic items. """ traffic = self.get_child_static('traffic') return {o.obj_name(): o for o in traffic.get_objects_or_children_by_type('trafficItem')}
[ "def", "get_traffic_items", "(", "self", ")", ":", "traffic", "=", "self", ".", "get_child_static", "(", "'traffic'", ")", "return", "{", "o", ".", "obj_name", "(", ")", ":", "o", "for", "o", "in", "traffic", ".", "get_objects_or_children_by_type", "(", "'...
:return: dictionary {name: object} of all traffic items.
[ ":", "return", ":", "dictionary", "{", "name", ":", "object", "}", "of", "all", "traffic", "items", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_root.py#L32-L38
Stranger6667/pyoffers
pyoffers/models/raw_log.py
LogFile.content
def content(self): """ Returns raw CSV content of the log file. """ raw_content = self._manager.api.session.get(self.download_link).content data = BytesIO(raw_content) archive = ZipFile(data) filename = archive.filelist[0] # Always 1 file in the archive return archive.read(filename)
python
def content(self): """ Returns raw CSV content of the log file. """ raw_content = self._manager.api.session.get(self.download_link).content data = BytesIO(raw_content) archive = ZipFile(data) filename = archive.filelist[0] # Always 1 file in the archive return archive.read(filename)
[ "def", "content", "(", "self", ")", ":", "raw_content", "=", "self", ".", "_manager", ".", "api", ".", "session", ".", "get", "(", "self", ".", "download_link", ")", ".", "content", "data", "=", "BytesIO", "(", "raw_content", ")", "archive", "=", "ZipF...
Returns raw CSV content of the log file.
[ "Returns", "raw", "CSV", "content", "of", "the", "log", "file", "." ]
train
https://github.com/Stranger6667/pyoffers/blob/9575d6cdc878096242268311a22cc5fdd4f64b37/pyoffers/models/raw_log.py#L44-L52
karel-brinda/rnftools
rnftools/rnfformat/Validator.py
Validator.validate
def validate(self, read_tuple_name): """Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s """ if reg_lrn.match(read_tuple_name) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_read_tuple_name_structure", message="'{}' is not matched".format(reg_lrn), ) else: parts = read_tuple_name.split("__") if reg_prefix_part.match(parts[0]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_prefix_part", message="'{}' is not matched".format(reg_prefix_part), ) if reg_id_part.match(parts[1]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_id_part", message="'{}' is not matched".format(reg_id_part), ) if reg_segmental_part.match(parts[2]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_segmental_part", message="'{}' is not matched".format(reg_segmental_part), ) if reg_suffix_part.match(parts[3]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_suffix_part", message="'{}' is not matched".format(reg_suffix_part), ) if not self.rnf_profile.check(read_tuple_name): self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_profile", message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format( self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name), ), warning=True, )
python
def validate(self, read_tuple_name): """Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s """ if reg_lrn.match(read_tuple_name) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_read_tuple_name_structure", message="'{}' is not matched".format(reg_lrn), ) else: parts = read_tuple_name.split("__") if reg_prefix_part.match(parts[0]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_prefix_part", message="'{}' is not matched".format(reg_prefix_part), ) if reg_id_part.match(parts[1]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_id_part", message="'{}' is not matched".format(reg_id_part), ) if reg_segmental_part.match(parts[2]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_segmental_part", message="'{}' is not matched".format(reg_segmental_part), ) if reg_suffix_part.match(parts[3]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_suffix_part", message="'{}' is not matched".format(reg_suffix_part), ) if not self.rnf_profile.check(read_tuple_name): self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_profile", message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format( self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name), ), warning=True, )
[ "def", "validate", "(", "self", ",", "read_tuple_name", ")", ":", "if", "reg_lrn", ".", "match", "(", "read_tuple_name", ")", "is", "None", ":", "self", ".", "report_error", "(", "read_tuple_name", "=", "read_tuple_name", ",", "error_name", "=", "\"wrong_read_...
Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s
[ "Check", "RNF", "validity", "of", "a", "read", "tuple", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/Validator.py#L37-L89
karel-brinda/rnftools
rnftools/rnfformat/Validator.py
Validator.report_error
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False): """Report an error. Args: read_tuple_name (): Name of the read tuple. error_name (): Name of the error. wrong (str): What is wrong. message (str): Additional msessage to be printed. warning (bool): Warning (not an error). """ if (not self.report_only_first) or (error_name not in self.reported_errors): print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message])) self.reported_errors.add(error_name) if warning: self.warning_has_been_reported = True else: self.error_has_been_reported = True
python
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False): """Report an error. Args: read_tuple_name (): Name of the read tuple. error_name (): Name of the error. wrong (str): What is wrong. message (str): Additional msessage to be printed. warning (bool): Warning (not an error). """ if (not self.report_only_first) or (error_name not in self.reported_errors): print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message])) self.reported_errors.add(error_name) if warning: self.warning_has_been_reported = True else: self.error_has_been_reported = True
[ "def", "report_error", "(", "self", ",", "read_tuple_name", ",", "error_name", ",", "wrong", "=", "\"\"", ",", "message", "=", "\"\"", ",", "warning", "=", "False", ")", ":", "if", "(", "not", "self", ".", "report_only_first", ")", "or", "(", "error_name...
Report an error. Args: read_tuple_name (): Name of the read tuple. error_name (): Name of the error. wrong (str): What is wrong. message (str): Additional msessage to be printed. warning (bool): Warning (not an error).
[ "Report", "an", "error", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/rnfformat/Validator.py#L99-L115
TurboGears/backlash
backlash/tracing/slowrequests/middleware.py
TraceSlowRequestsMiddleware._is_exempt
def _is_exempt(self, environ): """ Returns True if this request's URL starts with one of the excluded paths. """ exemptions = self.exclude_paths if exemptions: path = environ.get('PATH_INFO') for excluded_p in self.exclude_paths: if path.startswith(excluded_p): return True return False
python
def _is_exempt(self, environ): """ Returns True if this request's URL starts with one of the excluded paths. """ exemptions = self.exclude_paths if exemptions: path = environ.get('PATH_INFO') for excluded_p in self.exclude_paths: if path.startswith(excluded_p): return True return False
[ "def", "_is_exempt", "(", "self", ",", "environ", ")", ":", "exemptions", "=", "self", ".", "exclude_paths", "if", "exemptions", ":", "path", "=", "environ", ".", "get", "(", "'PATH_INFO'", ")", "for", "excluded_p", "in", "self", ".", "exclude_paths", ":",...
Returns True if this request's URL starts with one of the excluded paths.
[ "Returns", "True", "if", "this", "request", "s", "URL", "starts", "with", "one", "of", "the", "excluded", "paths", "." ]
train
https://github.com/TurboGears/backlash/blob/b8c73a6c8a203843f5a52c43b858ae5907fb2a4f/backlash/tracing/slowrequests/middleware.py#L77-L90
sehir-bioinformatics-database-lab/metabolitics
metabolitics/utils/io_utils.py
load_network_model
def load_network_model(model): ''' Loads metabolic network models in metabolitics. :param str model: model name ''' if type(model) == str: if model in ['ecoli', 'textbook', 'salmonella']: return cb.test.create_test_model(model) elif model == 'recon2': return cb.io.load_json_model('%s/network_models/%s.json' % (DATASET_PATH, model)) if type(model) == cb.Model: return model
python
def load_network_model(model): ''' Loads metabolic network models in metabolitics. :param str model: model name ''' if type(model) == str: if model in ['ecoli', 'textbook', 'salmonella']: return cb.test.create_test_model(model) elif model == 'recon2': return cb.io.load_json_model('%s/network_models/%s.json' % (DATASET_PATH, model)) if type(model) == cb.Model: return model
[ "def", "load_network_model", "(", "model", ")", ":", "if", "type", "(", "model", ")", "==", "str", ":", "if", "model", "in", "[", "'ecoli'", ",", "'textbook'", ",", "'salmonella'", "]", ":", "return", "cb", ".", "test", ".", "create_test_model", "(", "...
Loads metabolic network models in metabolitics. :param str model: model name
[ "Loads", "metabolic", "network", "models", "in", "metabolitics", "." ]
train
https://github.com/sehir-bioinformatics-database-lab/metabolitics/blob/a3aa34e82ad2d9641d9eaadba7ef619d56035012/metabolitics/utils/io_utils.py#L11-L24
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.get_expired
def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age)
python
def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age)
[ "def", "get_expired", "(", "self", ")", ":", "max_age", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "seconds", "=", "app_settings", ".", "ESI_TOKEN_VALID_DURATION", ")", "return", "self", ".", "filter", "(", "created__lte", "=", "max_age", ...
Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset`
[ "Get", "all", "tokens", "which", "have", "expired", ".", ":", "return", ":", "All", "expired", "tokens", ".", ":", "rtype", ":", ":", "class", ":", "esi", ".", "managers", ".", "TokenQueryset" ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L29-L36
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.bulk_refresh
def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete)
python
def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete)
[ "def", "bulk_refresh", "(", "self", ")", ":", "session", "=", "OAuth2Session", "(", "app_settings", ".", "ESI_SSO_CLIENT_ID", ")", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "app_settings", ".", "ESI_SSO_CLIENT_ID", ",", "app_settings", "."...
Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons.
[ "Refreshes", "all", "refreshable", "tokens", "in", "the", "queryset", ".", "Deletes", "any", "tokens", "which", "fail", "to", "refresh", ".", "Deletes", "any", "tokens", "which", "are", "expired", "and", "cannot", "refresh", ".", "Excludes", "tokens", "for", ...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L38-L58
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_valid
def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid
python
def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid
[ "def", "require_valid", "(", "self", ")", ":", "expired", "=", "self", ".", "get_expired", "(", ")", "valid", "=", "self", ".", "exclude", "(", "pk__in", "=", "expired", ")", "valid_expired", "=", "expired", ".", "bulk_refresh", "(", ")", "return", "vali...
Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset`
[ "Ensures", "all", "tokens", "are", "still", "valid", ".", "If", "expired", "attempts", "to", "refresh", ".", "Deletes", "those", "which", "fail", "to", "refresh", "or", "cannot", "be", "refreshed", ".", ":", "return", ":", "All", "tokens", "which", "are", ...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L60-L70
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_scopes
def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens
python
def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens
[ "def", "require_scopes", "(", "self", ",", "scope_string", ")", ":", "scopes", "=", "_process_scopes", "(", "scope_string", ")", "if", "not", "scopes", ":", "# asking for tokens with no scopes", "return", "self", ".", "filter", "(", "scopes__isnull", "=", "True", ...
:param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset`
[ ":", "param", "scope_string", ":", "The", "required", "scopes", ".", ":", "type", "scope_string", ":", "Union", "[", "str", "list", "]", ":", "return", ":", "The", "tokens", "with", "all", "requested", "scopes", ".", ":", "rtype", ":", ":", "class", ":...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L72-L91
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_scopes_exact
def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks)
python
def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks)
[ "def", "require_scopes_exact", "(", "self", ",", "scope_string", ")", ":", "num_scopes", "=", "len", "(", "_process_scopes", "(", "scope_string", ")", ")", "pks", "=", "[", "v", "[", "'pk'", "]", "for", "v", "in", "self", ".", "annotate", "(", "models", ...
:param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset`
[ ":", "param", "scope_string", ":", "The", "required", "scopes", ".", ":", "type", "scope_string", ":", "Union", "[", "str", "list", "]", ":", "return", ":", "The", "tokens", "with", "only", "the", "requested", "scopes", ".", ":", "rtype", ":", ":", "cl...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L93-L103
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.equivalent_to
def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
python
def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
[ "def", "equivalent_to", "(", "self", ",", "token", ")", ":", "return", "self", ".", "filter", "(", "character_id", "=", "token", ".", "character_id", ")", ".", "require_scopes_exact", "(", "token", ".", "scopes", ".", "all", "(", ")", ")", ".", "filter",...
Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset`
[ "Gets", "all", "tokens", "which", "match", "the", "character", "and", "scopes", "of", "a", "reference", "token", ":", "param", "token", ":", ":", "class", ":", "esi", ".", "models", ".", "Token", ":", "return", ":", ":", "class", ":", "esi", ".", "ma...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L105-L112
Adarnof/adarnauth-esi
esi/managers.py
TokenManager.create_from_code
def create_from_code(self, code, user=None): """ Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token` """ # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
python
def create_from_code(self, code, user=None): """ Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token` """ # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
[ "def", "create_from_code", "(", "self", ",", "code", ",", "user", "=", "None", ")", ":", "# perform code exchange", "logger", ".", "debug", "(", "\"Creating new token from code {0}\"", ".", "format", "(", "code", "[", ":", "-", "5", "]", ")", ")", "oauth", ...
Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token`
[ "Perform", "OAuth", "code", "exchange", "to", "retrieve", "a", "token", ".", ":", "param", "code", ":", "OAuth", "grant", "code", ".", ":", "param", "user", ":", "User", "who", "will", "own", "token", ".", ":", "return", ":", ":", "class", ":", "esi"...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L123-L189
Adarnof/adarnauth-esi
esi/managers.py
TokenManager.create_from_request
def create_from_request(self, request): """ Generate a token from the OAuth callback request. Must contain 'code' in GET. :param request: OAuth callback request. :return: :class:`esi.models.Token` """ logger.debug("Creating new token for {0} session {1}".format(request.user, request.session.session_key[:5])) code = request.GET.get('code') # attach a user during creation for some functionality in a post_save created receiver I'm working on elsewhere model = self.create_from_code(code, user=request.user if request.user.is_authenticated else None) return model
python
def create_from_request(self, request): """ Generate a token from the OAuth callback request. Must contain 'code' in GET. :param request: OAuth callback request. :return: :class:`esi.models.Token` """ logger.debug("Creating new token for {0} session {1}".format(request.user, request.session.session_key[:5])) code = request.GET.get('code') # attach a user during creation for some functionality in a post_save created receiver I'm working on elsewhere model = self.create_from_code(code, user=request.user if request.user.is_authenticated else None) return model
[ "def", "create_from_request", "(", "self", ",", "request", ")", ":", "logger", ".", "debug", "(", "\"Creating new token for {0} session {1}\"", ".", "format", "(", "request", ".", "user", ",", "request", ".", "session", ".", "session_key", "[", ":", "5", "]", ...
Generate a token from the OAuth callback request. Must contain 'code' in GET. :param request: OAuth callback request. :return: :class:`esi.models.Token`
[ "Generate", "a", "token", "from", "the", "OAuth", "callback", "request", ".", "Must", "contain", "code", "in", "GET", ".", ":", "param", "request", ":", "OAuth", "callback", "request", ".", ":", "return", ":", ":", "class", ":", "esi", ".", "models", "...
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L191-L201
litl/park
park.py
ibatch
def ibatch(iterable, size): """Yield a series of batches from iterable, each size elements long.""" source = iter(iterable) while True: batch = itertools.islice(source, size) yield itertools.chain([next(batch)], batch)
python
def ibatch(iterable, size): """Yield a series of batches from iterable, each size elements long.""" source = iter(iterable) while True: batch = itertools.islice(source, size) yield itertools.chain([next(batch)], batch)
[ "def", "ibatch", "(", "iterable", ",", "size", ")", ":", "source", "=", "iter", "(", "iterable", ")", "while", "True", ":", "batch", "=", "itertools", ".", "islice", "(", "source", ",", "size", ")", "yield", "itertools", ".", "chain", "(", "[", "next...
Yield a series of batches from iterable, each size elements long.
[ "Yield", "a", "series", "of", "batches", "from", "iterable", "each", "size", "elements", "long", "." ]
train
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L210-L215
litl/park
park.py
KVStore.put_many
def put_many(self, items): # pragma: no cover """Put many key-value pairs. This method may take advantage of performance or atomicity features of the underlying store. It does not guarantee that all items will be set in the same transaction, only that transactions may be used for performance. :param items: An iterable producing (key, value) tuples. """ for key, value in items: self.put(key, value)
python
def put_many(self, items): # pragma: no cover """Put many key-value pairs. This method may take advantage of performance or atomicity features of the underlying store. It does not guarantee that all items will be set in the same transaction, only that transactions may be used for performance. :param items: An iterable producing (key, value) tuples. """ for key, value in items: self.put(key, value)
[ "def", "put_many", "(", "self", ",", "items", ")", ":", "# pragma: no cover", "for", "key", ",", "value", "in", "items", ":", "self", ".", "put", "(", "key", ",", "value", ")" ]
Put many key-value pairs. This method may take advantage of performance or atomicity features of the underlying store. It does not guarantee that all items will be set in the same transaction, only that transactions may be used for performance. :param items: An iterable producing (key, value) tuples.
[ "Put", "many", "key", "-", "value", "pairs", "." ]
train
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L97-L109
litl/park
park.py
KVStore.prefix_items
def prefix_items(self, prefix, strip_prefix=False): """Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``. """ items = self.items(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key, value in items: if not key.startswith(prefix): break yield key[start:], value
python
def prefix_items(self, prefix, strip_prefix=False): """Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``. """ items = self.items(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key, value in items: if not key.startswith(prefix): break yield key[start:], value
[ "def", "prefix_items", "(", "self", ",", "prefix", ",", "strip_prefix", "=", "False", ")", ":", "items", "=", "self", ".", "items", "(", "key_from", "=", "prefix", ")", "start", "=", "0", "if", "strip_prefix", ":", "start", "=", "len", "(", "prefix", ...
Get all (key, value) pairs with keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All (key, value) pairs in the store where the keys begin with the ``prefix``.
[ "Get", "all", "(", "key", "value", ")", "pairs", "with", "keys", "that", "begin", "with", "prefix", "." ]
train
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L162-L184
litl/park
park.py
KVStore.prefix_keys
def prefix_keys(self, prefix, strip_prefix=False): """Get all keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All keys in the store that begin with ``prefix``. """ keys = self.keys(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key in keys: if not key.startswith(prefix): break yield key[start:]
python
def prefix_keys(self, prefix, strip_prefix=False): """Get all keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All keys in the store that begin with ``prefix``. """ keys = self.keys(key_from=prefix) start = 0 if strip_prefix: start = len(prefix) for key in keys: if not key.startswith(prefix): break yield key[start:]
[ "def", "prefix_keys", "(", "self", ",", "prefix", ",", "strip_prefix", "=", "False", ")", ":", "keys", "=", "self", ".", "keys", "(", "key_from", "=", "prefix", ")", "start", "=", "0", "if", "strip_prefix", ":", "start", "=", "len", "(", "prefix", ")...
Get all keys that begin with ``prefix``. :param prefix: Lexical prefix for keys to search. :type prefix: bytes :param strip_prefix: True to strip the prefix from yielded items. :type strip_prefix: bool :yields: All keys in the store that begin with ``prefix``.
[ "Get", "all", "keys", "that", "begin", "with", "prefix", "." ]
train
https://github.com/litl/park/blob/85738418b3c1db57046a5b2f217ee3f5d55851df/park.py#L186-L207
PGower/PyCanvas
pycanvas/apis/gradebook_history.py
GradebookHistoryAPI.details_for_given_date_in_gradebook_history_for_this_course
def details_for_given_date_in_gradebook_history_for_this_course(self, date, course_id): """ Details for a given date in gradebook history for this course. Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see detailed information""" path["date"] = date self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}".format(**path), data=data, params=params, all_pages=True)
python
def details_for_given_date_in_gradebook_history_for_this_course(self, date, course_id): """ Details for a given date in gradebook history for this course. Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see detailed information""" path["date"] = date self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}".format(**path), data=data, params=params, all_pages=True)
[ "def", "details_for_given_date_in_gradebook_history_for_this_course", "(", "self", ",", "date", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"The id of the contextual course f...
Details for a given date in gradebook history for this course. Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date.
[ "Details", "for", "a", "given", "date", "in", "gradebook", "history", "for", "this", "course", ".", "Returns", "the", "graders", "who", "worked", "on", "this", "day", "along", "with", "the", "assignments", "they", "worked", "on", ".", "More", "details", "c...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/gradebook_history.py#L36-L57
PGower/PyCanvas
pycanvas/apis/gradebook_history.py
GradebookHistoryAPI.lists_submissions
def lists_submissions(self, date, course_id, grader_id, assignment_id): """ Lists submissions. Gives a nested list of submission versions """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see submissions""" path["date"] = date # REQUIRED - PATH - grader_id """The ID of the grader for which you want to see submissions""" path["grader_id"] = grader_id # REQUIRED - PATH - assignment_id """The ID of the assignment for which you want to see submissions""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
python
def lists_submissions(self, date, course_id, grader_id, assignment_id): """ Lists submissions. Gives a nested list of submission versions """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # REQUIRED - PATH - date """The date for which you would like to see submissions""" path["date"] = date # REQUIRED - PATH - grader_id """The ID of the grader for which you want to see submissions""" path["grader_id"] = grader_id # REQUIRED - PATH - assignment_id """The ID of the assignment for which you want to see submissions""" path["assignment_id"] = assignment_id self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/{date}/graders/{grader_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
[ "def", "lists_submissions", "(", "self", ",", "date", ",", "course_id", ",", "grader_id", ",", "assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"The id of the contextual co...
Lists submissions. Gives a nested list of submission versions
[ "Lists", "submissions", ".", "Gives", "a", "nested", "list", "of", "submission", "versions" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/gradebook_history.py#L59-L86
PGower/PyCanvas
pycanvas/apis/gradebook_history.py
GradebookHistoryAPI.list_uncollated_submission_versions
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None): """ List uncollated submission versions. Gives a paginated, uncollated list of submission versions for all matching submissions in the context. This SubmissionVersion objects will not include the +new_grade+ or +previous_grade+ keys, only the +grade+; same for +graded_at+ and +grader+. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # OPTIONAL - assignment_id """The ID of the assignment for which you want to see submissions. If absent, versions of submissions from any assignment in the course are included.""" if assignment_id is not None: params["assignment_id"] = assignment_id # OPTIONAL - user_id """The ID of the user for which you want to see submissions. If absent, versions of submissions from any user in the course are included.""" if user_id is not None: params["user_id"] = user_id # OPTIONAL - ascending """Returns submission versions in ascending date order (oldest first). If absent, returns submission versions in descending date order (newest first).""" if ascending is not None: params["ascending"] = ascending self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True)
python
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None): """ List uncollated submission versions. Gives a paginated, uncollated list of submission versions for all matching submissions in the context. This SubmissionVersion objects will not include the +new_grade+ or +previous_grade+ keys, only the +grade+; same for +graded_at+ and +grader+. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """The id of the contextual course for this API call""" path["course_id"] = course_id # OPTIONAL - assignment_id """The ID of the assignment for which you want to see submissions. If absent, versions of submissions from any assignment in the course are included.""" if assignment_id is not None: params["assignment_id"] = assignment_id # OPTIONAL - user_id """The ID of the user for which you want to see submissions. If absent, versions of submissions from any user in the course are included.""" if user_id is not None: params["user_id"] = user_id # OPTIONAL - ascending """Returns submission versions in ascending date order (oldest first). If absent, returns submission versions in descending date order (newest first).""" if ascending is not None: params["ascending"] = ascending self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_uncollated_submission_versions", "(", "self", ",", "course_id", ",", "ascending", "=", "None", ",", "assignment_id", "=", "None", ",", "user_id", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", ...
List uncollated submission versions. Gives a paginated, uncollated list of submission versions for all matching submissions in the context. This SubmissionVersion objects will not include the +new_grade+ or +previous_grade+ keys, only the +grade+; same for +graded_at+ and +grader+.
[ "List", "uncollated", "submission", "versions", ".", "Gives", "a", "paginated", "uncollated", "list", "of", "submission", "versions", "for", "all", "matching", "submissions", "in", "the", "context", ".", "This", "SubmissionVersion", "objects", "will", "not", "incl...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/gradebook_history.py#L88-L126
theonion/django-bulbs
bulbs/sections/models.py
Section.save
def save(self, *args, **kwargs): """Saving ensures that the slug, if not set, is set to the slugified name.""" if not self.slug: self.slug = slugify(self.name) section = super(Section, self).save(*args, **kwargs) if self.query and self.query != {}: self._save_percolator() return section
python
def save(self, *args, **kwargs): """Saving ensures that the slug, if not set, is set to the slugified name.""" if not self.slug: self.slug = slugify(self.name) section = super(Section, self).save(*args, **kwargs) if self.query and self.query != {}: self._save_percolator() return section
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "slug", ":", "self", ".", "slug", "=", "slugify", "(", "self", ".", "name", ")", "section", "=", "super", "(", "Section", ",", "self", ")...
Saving ensures that the slug, if not set, is set to the slugified name.
[ "Saving", "ensures", "that", "the", "slug", "if", "not", "set", "is", "set", "to", "the", "slugified", "name", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/sections/models.py#L44-L55
theonion/django-bulbs
bulbs/sections/models.py
Section._save_percolator
def _save_percolator(self): """saves the query field as an elasticsearch percolator """ index = Content.search_objects.mapping.index query_filter = self.get_content().to_dict() q = {} if "query" in query_filter: q = {"query": query_filter.get("query", {})} else: return es.index( index=index, doc_type=".percolator", body=q, id=self.es_id )
python
def _save_percolator(self): """saves the query field as an elasticsearch percolator """ index = Content.search_objects.mapping.index query_filter = self.get_content().to_dict() q = {} if "query" in query_filter: q = {"query": query_filter.get("query", {})} else: return es.index( index=index, doc_type=".percolator", body=q, id=self.es_id )
[ "def", "_save_percolator", "(", "self", ")", ":", "index", "=", "Content", ".", "search_objects", ".", "mapping", ".", "index", "query_filter", "=", "self", ".", "get_content", "(", ")", ".", "to_dict", "(", ")", "q", "=", "{", "}", "if", "\"query\"", ...
saves the query field as an elasticsearch percolator
[ "saves", "the", "query", "field", "as", "an", "elasticsearch", "percolator" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/sections/models.py#L57-L75
theonion/django-bulbs
bulbs/sections/models.py
Section.get_content
def get_content(self): """performs es search and gets content objects """ if "query" in self.query: q = self.query["query"] else: q = self.query search = custom_search_model(Content, q, field_map={ "feature-type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type", }) return search
python
def get_content(self): """performs es search and gets content objects """ if "query" in self.query: q = self.query["query"] else: q = self.query search = custom_search_model(Content, q, field_map={ "feature-type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type", }) return search
[ "def", "get_content", "(", "self", ")", ":", "if", "\"query\"", "in", "self", ".", "query", ":", "q", "=", "self", ".", "query", "[", "\"query\"", "]", "else", ":", "q", "=", "self", ".", "query", "search", "=", "custom_search_model", "(", "Content", ...
performs es search and gets content objects
[ "performs", "es", "search", "and", "gets", "content", "objects" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/sections/models.py#L81-L93
karel-brinda/rnftools
rnftools/mishmash/__init__.py
sample
def sample(name, reads_in_tuple): """ Create a new sample. """ if name in [sample_x.get_name() for sample_x in __SAMPLES__]: rnftools.utils.error( "Multiple samples have the same name. Each sample must have a unique name.", program="RNFtools", subprogram="MIShmash", exception=ValueError, ) Sample( name=name, reads_in_tuple=reads_in_tuple, ) add_input(current_sample().fq_fns())
python
def sample(name, reads_in_tuple): """ Create a new sample. """ if name in [sample_x.get_name() for sample_x in __SAMPLES__]: rnftools.utils.error( "Multiple samples have the same name. Each sample must have a unique name.", program="RNFtools", subprogram="MIShmash", exception=ValueError, ) Sample( name=name, reads_in_tuple=reads_in_tuple, ) add_input(current_sample().fq_fns())
[ "def", "sample", "(", "name", ",", "reads_in_tuple", ")", ":", "if", "name", "in", "[", "sample_x", ".", "get_name", "(", ")", "for", "sample_x", "in", "__SAMPLES__", "]", ":", "rnftools", ".", "utils", ".", "error", "(", "\"Multiple samples have the same na...
Create a new sample.
[ "Create", "a", "new", "sample", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/mishmash/__init__.py#L64-L79
damoti/django-postgres-schema
postgres_schema/models.py
get_schema_model
def get_schema_model(): """ Returns the schema model that is active in this project. """ try: return django_apps.get_model(settings.POSTGRES_SCHEMA_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("POSTGRES_SCHEMA_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "POSTGRES_SCHEMA_MODEL refers to model '%s' that has not been installed" % settings.POSTGRES_SCHEMA_MODEL )
python
def get_schema_model(): """ Returns the schema model that is active in this project. """ try: return django_apps.get_model(settings.POSTGRES_SCHEMA_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("POSTGRES_SCHEMA_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "POSTGRES_SCHEMA_MODEL refers to model '%s' that has not been installed" % settings.POSTGRES_SCHEMA_MODEL )
[ "def", "get_schema_model", "(", ")", ":", "try", ":", "return", "django_apps", ".", "get_model", "(", "settings", ".", "POSTGRES_SCHEMA_MODEL", ",", "require_ready", "=", "False", ")", "except", "ValueError", ":", "raise", "ImproperlyConfigured", "(", "\"POSTGRES_...
Returns the schema model that is active in this project.
[ "Returns", "the", "schema", "model", "that", "is", "active", "in", "this", "project", "." ]
train
https://github.com/damoti/django-postgres-schema/blob/49b7e721abaacc10ec281289df8a67bf8e8b50e0/postgres_schema/models.py#L18-L29
saghul/evergreen
evergreen/lib/ssl.py
SSLSocket.read
def read(self, len=1024): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" while True: try: return self._sslobj.read(len) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: return b'' elif ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) else: raise
python
def read(self, len=1024): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" while True: try: return self._sslobj.read(len) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: return b'' elif ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout) else: raise
[ "def", "read", "(", "self", ",", "len", "=", "1024", ")", ":", "while", "True", ":", "try", ":", "return", "self", ".", "_sslobj", ".", "read", "(", "len", ")", "except", "SSLError", ":", "ex", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "...
Read up to LEN bytes and return them. Return zero-length string on EOF.
[ "Read", "up", "to", "LEN", "bytes", "and", "return", "them", ".", "Return", "zero", "-", "length", "string", "on", "EOF", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/lib/ssl.py#L105-L126
saghul/evergreen
evergreen/lib/ssl.py
SSLSocket.write
def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" while True: try: return self._sslobj.write(data) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) else: raise
python
def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" while True: try: return self._sslobj.write(data) except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout) else: raise
[ "def", "write", "(", "self", ",", "data", ")", ":", "while", "True", ":", "try", ":", "return", "self", ".", "_sslobj", ".", "write", "(", "data", ")", "except", "SSLError", ":", "ex", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", ...
Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.
[ "Write", "DATA", "to", "the", "underlying", "SSL", "channel", ".", "Returns", "number", "of", "bytes", "of", "DATA", "actually", "transmitted", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/lib/ssl.py#L128-L147
saghul/evergreen
evergreen/lib/ssl.py
SSLSocket.do_handshake
def do_handshake(self): """Perform a TLS/SSL handshake.""" while True: try: return self._sslobj.do_handshake() except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) else: raise
python
def do_handshake(self): """Perform a TLS/SSL handshake.""" while True: try: return self._sslobj.do_handshake() except SSLError: ex = sys.exc_info()[1] if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise six.exc_clear() self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) else: raise
[ "def", "do_handshake", "(", "self", ")", ":", "while", "True", ":", "try", ":", "return", "self", ".", "_sslobj", ".", "do_handshake", "(", ")", "except", "SSLError", ":", "ex", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "ex", ".",...
Perform a TLS/SSL handshake.
[ "Perform", "a", "TLS", "/", "SSL", "handshake", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/lib/ssl.py#L288-L306
saghul/evergreen
evergreen/lib/ssl.py
SSLSocket.connect
def connect(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._sslobj: raise ValueError("attempt to connect already-connected SSLSocket!") socket.connect(self, addr) if six.PY3: self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname) else: if self.ciphers is None: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs) else: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs, self.ciphers) if self.do_handshake_on_connect: self.do_handshake()
python
def connect(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._sslobj: raise ValueError("attempt to connect already-connected SSLSocket!") socket.connect(self, addr) if six.PY3: self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname) else: if self.ciphers is None: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs) else: self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs, self.ciphers) if self.do_handshake_on_connect: self.do_handshake()
[ "def", "connect", "(", "self", ",", "addr", ")", ":", "# Here we assume that the socket is client-side, and not", "# connected at the time of the call. We connect it, then wrap it.", "if", "self", ".", "_sslobj", ":", "raise", "ValueError", "(", "\"attempt to connect already-con...
Connects to remote ADDR, and then wraps the connection in an SSL channel.
[ "Connects", "to", "remote", "ADDR", "and", "then", "wraps", "the", "connection", "in", "an", "SSL", "channel", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/lib/ssl.py#L308-L328
saghul/evergreen
evergreen/lib/ssl.py
SSLSocket.accept
def accept(self): """Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.""" newsock, addr = socket.accept(self) ssl_sock = SSLSocket(newsock._sock, keyfile=self.keyfile, certfile=self.certfile, server_side=True, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ca_certs=self.ca_certs, do_handshake_on_connect=self.do_handshake_on_connect, suppress_ragged_eofs=self.suppress_ragged_eofs, ciphers=self.ciphers) return ssl_sock, addr
python
def accept(self): """Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.""" newsock, addr = socket.accept(self) ssl_sock = SSLSocket(newsock._sock, keyfile=self.keyfile, certfile=self.certfile, server_side=True, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ca_certs=self.ca_certs, do_handshake_on_connect=self.do_handshake_on_connect, suppress_ragged_eofs=self.suppress_ragged_eofs, ciphers=self.ciphers) return ssl_sock, addr
[ "def", "accept", "(", "self", ")", ":", "newsock", ",", "addr", "=", "socket", ".", "accept", "(", "self", ")", "ssl_sock", "=", "SSLSocket", "(", "newsock", ".", "_sock", ",", "keyfile", "=", "self", ".", "keyfile", ",", "certfile", "=", "self", "."...
Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.
[ "Accepts", "a", "new", "connection", "from", "a", "remote", "client", "and", "returns", "a", "tuple", "containing", "that", "new", "connection", "wrapped", "with", "a", "server", "-", "side", "SSL", "channel", "and", "the", "address", "of", "the", "remote", ...
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/lib/ssl.py#L330-L345
Sliim/soundcloud-syncer
ssyncer/sclient.py
sclient.send_request
def send_request(self, url): """ Send a request to given url. """ while True: try: return urllib.request.urlopen(url) except urllib.error.HTTPError as e: raise serror( "Request `%s` failed (%s:%s)." % (url, e.__class__.__name__, e.code)) except Exception as e: choice = input(serror( "Error occured: %s - Retry? [yN]" % type(e))) if choice.strip().lower() != "y": raise serror(e)
python
def send_request(self, url): """ Send a request to given url. """ while True: try: return urllib.request.urlopen(url) except urllib.error.HTTPError as e: raise serror( "Request `%s` failed (%s:%s)." % (url, e.__class__.__name__, e.code)) except Exception as e: choice = input(serror( "Error occured: %s - Retry? [yN]" % type(e))) if choice.strip().lower() != "y": raise serror(e)
[ "def", "send_request", "(", "self", ",", "url", ")", ":", "while", "True", ":", "try", ":", "return", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "except", "urllib", ".", "error", ".", "HTTPError", "as", "e", ":", "raise", "serror", "(...
Send a request to given url.
[ "Send", "a", "request", "to", "given", "url", "." ]
train
https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/sclient.py#L54-L67
Sliim/soundcloud-syncer
ssyncer/sclient.py
sclient.get
def get(self, uri): """ Send a request to given uri. """ return self.send_request( "{0}://{1}:{2}{3}{4}".format( self.get_protocol(), self.host, self.port, uri, self.client_id ) )
python
def get(self, uri): """ Send a request to given uri. """ return self.send_request( "{0}://{1}:{2}{3}{4}".format( self.get_protocol(), self.host, self.port, uri, self.client_id ) )
[ "def", "get", "(", "self", ",", "uri", ")", ":", "return", "self", ".", "send_request", "(", "\"{0}://{1}:{2}{3}{4}\"", ".", "format", "(", "self", ".", "get_protocol", "(", ")", ",", "self", ".", "host", ",", "self", ".", "port", ",", "uri", ",", "s...
Send a request to given uri.
[ "Send", "a", "request", "to", "given", "uri", "." ]
train
https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/sclient.py#L69-L79
Sliim/soundcloud-syncer
ssyncer/sclient.py
sclient.get_client_id
def get_client_id(self): """ Attempt to get client_id from soundcloud homepage. """ # FIXME: This method doesn't works id = re.search( "\"clientID\":\"([a-z0-9]*)\"", self.send_request(self.SC_HOME).read().decode("utf-8")) if not id: raise serror("Cannot retrieve client_id.") return id.group(1)
python
def get_client_id(self): """ Attempt to get client_id from soundcloud homepage. """ # FIXME: This method doesn't works id = re.search( "\"clientID\":\"([a-z0-9]*)\"", self.send_request(self.SC_HOME).read().decode("utf-8")) if not id: raise serror("Cannot retrieve client_id.") return id.group(1)
[ "def", "get_client_id", "(", "self", ")", ":", "# FIXME: This method doesn't works", "id", "=", "re", ".", "search", "(", "\"\\\"clientID\\\":\\\"([a-z0-9]*)\\\"\"", ",", "self", ".", "send_request", "(", "self", ".", "SC_HOME", ")", ".", "read", "(", ")", ".", ...
Attempt to get client_id from soundcloud homepage.
[ "Attempt", "to", "get", "client_id", "from", "soundcloud", "homepage", "." ]
train
https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/sclient.py#L85-L95
tinybike/coinbridge
coinbridge/__init__.py
error_handler
def error_handler(task): """Handle and log RPC errors.""" @wraps(task) def wrapper(self, *args, **kwargs): try: return task(self, *args, **kwargs) except Exception as e: self.connected = False if not self.testing: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] error_message = ( "[" + str(datetime.now()) + "] Error in task \"" + task.__name__ + "\" (" + fname + "/" + str(exc_tb.tb_lineno) + ")" + e.message ) self.logger.error("%s: RPC instruction failed" % error_message) return wrapper
python
def error_handler(task): """Handle and log RPC errors.""" @wraps(task) def wrapper(self, *args, **kwargs): try: return task(self, *args, **kwargs) except Exception as e: self.connected = False if not self.testing: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] error_message = ( "[" + str(datetime.now()) + "] Error in task \"" + task.__name__ + "\" (" + fname + "/" + str(exc_tb.tb_lineno) + ")" + e.message ) self.logger.error("%s: RPC instruction failed" % error_message) return wrapper
[ "def", "error_handler", "(", "task", ")", ":", "@", "wraps", "(", "task", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "task", "(", "self", ",", "*", "args", ",", "*", "*", "kwar...
Handle and log RPC errors.
[ "Handle", "and", "log", "RPC", "errors", "." ]
train
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L63-L81
tinybike/coinbridge
coinbridge/__init__.py
Bridge.payment
def payment(self, origin, destination, amount): """Convenience method for sending Bitcoins. Send coins from origin to destination. Calls record_tx to log the transaction to database. Uses free, instant "move" transfers if addresses are both local (in the same wallet), and standard "sendfrom" transactions otherwise. The sender is required to be specified by user_id (account label); however, the recipient can be specified either by Bitcoin address (anyone) or user_id (if the user is local). Payment tries sending Bitcoins in this order: 1. "move" from account to account (local) 2. "move" from account to address (local) 3. "sendfrom" account to address (broadcast) Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send Returns: bool: True if successful, False otherwise """ if type(amount) != Decimal: amount = Decimal(amount) if amount <= 0: raise Exception("Amount must be a positive number") # Check if the destination is within the same wallet; # if so, we can use the fast (and free) "move" command all_addresses = [] accounts = self.listaccounts() if origin in accounts: if destination in accounts: with self.openwallet(): result = self.move(origin, destination, amount) return self.record_tx(origin, None, amount, result, destination) for account in accounts: addresses = self.getaddressesbyaccount(account) if destination in addresses: with self.openwallet(): result = self.move(origin, account, amount) return self.record_tx(origin, destination, amount, result, account) # Didn't find anything, so use "sendfrom" instead else: with self.openwallet(): txhash = self.sendfrom(origin, destination, amount) return self.record_tx(origin, destination, amount, txhash)
python
def payment(self, origin, destination, amount): """Convenience method for sending Bitcoins. Send coins from origin to destination. Calls record_tx to log the transaction to database. Uses free, instant "move" transfers if addresses are both local (in the same wallet), and standard "sendfrom" transactions otherwise. The sender is required to be specified by user_id (account label); however, the recipient can be specified either by Bitcoin address (anyone) or user_id (if the user is local). Payment tries sending Bitcoins in this order: 1. "move" from account to account (local) 2. "move" from account to address (local) 3. "sendfrom" account to address (broadcast) Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send Returns: bool: True if successful, False otherwise """ if type(amount) != Decimal: amount = Decimal(amount) if amount <= 0: raise Exception("Amount must be a positive number") # Check if the destination is within the same wallet; # if so, we can use the fast (and free) "move" command all_addresses = [] accounts = self.listaccounts() if origin in accounts: if destination in accounts: with self.openwallet(): result = self.move(origin, destination, amount) return self.record_tx(origin, None, amount, result, destination) for account in accounts: addresses = self.getaddressesbyaccount(account) if destination in addresses: with self.openwallet(): result = self.move(origin, account, amount) return self.record_tx(origin, destination, amount, result, account) # Didn't find anything, so use "sendfrom" instead else: with self.openwallet(): txhash = self.sendfrom(origin, destination, amount) return self.record_tx(origin, destination, amount, txhash)
[ "def", "payment", "(", "self", ",", "origin", ",", "destination", ",", "amount", ")", ":", "if", "type", "(", "amount", ")", "!=", "Decimal", ":", "amount", "=", "Decimal", "(", "amount", ")", "if", "amount", "<=", "0", ":", "raise", "Exception", "("...
Convenience method for sending Bitcoins. Send coins from origin to destination. Calls record_tx to log the transaction to database. Uses free, instant "move" transfers if addresses are both local (in the same wallet), and standard "sendfrom" transactions otherwise. The sender is required to be specified by user_id (account label); however, the recipient can be specified either by Bitcoin address (anyone) or user_id (if the user is local). Payment tries sending Bitcoins in this order: 1. "move" from account to account (local) 2. "move" from account to address (local) 3. "sendfrom" account to address (broadcast) Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send Returns: bool: True if successful, False otherwise
[ "Convenience", "method", "for", "sending", "Bitcoins", "." ]
train
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L128-L181
tinybike/coinbridge
coinbridge/__init__.py
Bridge.record_tx
def record_tx(self, origin, destination, amount, outcome, destination_id=None): """Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument """ # "move" commands if destination_id: tx = db.Transaction( txtype="move", from_user_id=origin, to_user_id=destination_id, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, ) # "sendfrom" commands else: self.logger.debug(self.gettransaction(outcome)) confirmations = self.gettransaction(outcome)["confirmations"] last_confirmation = datetime.now() if confirmations else None tx = db.Transaction( txtype="sendfrom", from_user_id=origin, txhash=outcome, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, confirmations=confirmations, last_confirmation=last_confirmation ) db.session.add(tx) db.session.commit() return outcome
python
def record_tx(self, origin, destination, amount, outcome, destination_id=None): """Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument """ # "move" commands if destination_id: tx = db.Transaction( txtype="move", from_user_id=origin, to_user_id=destination_id, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, ) # "sendfrom" commands else: self.logger.debug(self.gettransaction(outcome)) confirmations = self.gettransaction(outcome)["confirmations"] last_confirmation = datetime.now() if confirmations else None tx = db.Transaction( txtype="sendfrom", from_user_id=origin, txhash=outcome, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, confirmations=confirmations, last_confirmation=last_confirmation ) db.session.add(tx) db.session.commit() return outcome
[ "def", "record_tx", "(", "self", ",", "origin", ",", "destination", ",", "amount", ",", "outcome", ",", "destination_id", "=", "None", ")", ":", "# \"move\" commands", "if", "destination_id", ":", "tx", "=", "db", ".", "Transaction", "(", "txtype", "=", "\...
Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument
[ "Records", "a", "transaction", "in", "the", "database", "." ]
train
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L184-L231
tinybike/coinbridge
coinbridge/__init__.py
Bridge.rpc_connect
def rpc_connect(self): """Connect to a coin daemon's JSON RPC interface. Returns: bool: True if successfully connected, False otherwise. """ if self.coin in COINS: rpc_url = COINS[self.coin]["rpc-url"] + ":" if self.testnet: rpc_url += COINS[self.coin]["rpc-port-testnet"] else: rpc_url += COINS[self.coin]["rpc-port"] self.rpc = pyjsonrpc.HttpClient( url=rpc_url, username=COINS[self.coin]["rpc-user"], password=COINS[self.coin]["rpc-password"] ) self.logger.debug(self.coin, "RPC connection ok") self.connected = True else: self.logger.debug(self.coin, "bridge not found") return self.connected
python
def rpc_connect(self): """Connect to a coin daemon's JSON RPC interface. Returns: bool: True if successfully connected, False otherwise. """ if self.coin in COINS: rpc_url = COINS[self.coin]["rpc-url"] + ":" if self.testnet: rpc_url += COINS[self.coin]["rpc-port-testnet"] else: rpc_url += COINS[self.coin]["rpc-port"] self.rpc = pyjsonrpc.HttpClient( url=rpc_url, username=COINS[self.coin]["rpc-user"], password=COINS[self.coin]["rpc-password"] ) self.logger.debug(self.coin, "RPC connection ok") self.connected = True else: self.logger.debug(self.coin, "bridge not found") return self.connected
[ "def", "rpc_connect", "(", "self", ")", ":", "if", "self", ".", "coin", "in", "COINS", ":", "rpc_url", "=", "COINS", "[", "self", ".", "coin", "]", "[", "\"rpc-url\"", "]", "+", "\":\"", "if", "self", ".", "testnet", ":", "rpc_url", "+=", "COINS", ...
Connect to a coin daemon's JSON RPC interface. Returns: bool: True if successfully connected, False otherwise.
[ "Connect", "to", "a", "coin", "daemon", "s", "JSON", "RPC", "interface", "." ]
train
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L234-L256