repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
gapato/livestreamer-curses
src/livestreamer_curses/streamlist.py
StreamList.redraw_current_line
def redraw_current_line(self): """ Redraw the highlighted line """ if self.no_streams: return row = self.pads[self.current_pad].getyx()[0] s = self.filtered_streams[row] pad = self.pads['streams'] pad.move(row, 0) pad.clrtoeol() pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE) pad.chgat(curses.A_REVERSE) pad.move(row, 0) self.refresh_current_pad()
python
def redraw_current_line(self): """ Redraw the highlighted line """ if self.no_streams: return row = self.pads[self.current_pad].getyx()[0] s = self.filtered_streams[row] pad = self.pads['streams'] pad.move(row, 0) pad.clrtoeol() pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE) pad.chgat(curses.A_REVERSE) pad.move(row, 0) self.refresh_current_pad()
[ "def", "redraw_current_line", "(", "self", ")", ":", "if", "self", ".", "no_streams", ":", "return", "row", "=", "self", ".", "pads", "[", "self", ".", "current_pad", "]", ".", "getyx", "(", ")", "[", "0", "]", "s", "=", "self", ".", "filtered_stream...
Redraw the highlighted line
[ "Redraw", "the", "highlighted", "line" ]
train
https://github.com/gapato/livestreamer-curses/blob/d841a421422db8c5b5a8bcfcff6b3ddd7ea8a64b/src/livestreamer_curses/streamlist.py#L594-L606
fake-name/WebRequest
WebRequest/SeleniumModules/SeleniumChromiumMixin.py
WebGetSeleniumChromiumMixin.stepThroughJsWaf_selenium_chromium
def stepThroughJsWaf_selenium_chromium(self, url, titleContains='', titleNotContains=''): ''' Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through cloudflare browser verification.") if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() self.selenium_chromium_driver.get(url) if titleContains: condition = EC.title_contains(titleContains) elif titleNotContains: condition = SeleniumCommon.title_not_contains(titleNotContains) else: raise ValueError("Wat?") try: WebDriverWait(self.selenium_chromium_driver, 45).until(condition) success = True self.log.info("Successfully accessed main page!") except TimeoutException: self.log.error("Could not pass through cloudflare blocking!") success = False # Add cookies to cookiejar self._syncOutOfSeleniumChromiumWebDriver() self._syncCookiesFromFile() return success
python
def stepThroughJsWaf_selenium_chromium(self, url, titleContains='', titleNotContains=''): ''' Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through cloudflare browser verification.") if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() self.selenium_chromium_driver.get(url) if titleContains: condition = EC.title_contains(titleContains) elif titleNotContains: condition = SeleniumCommon.title_not_contains(titleNotContains) else: raise ValueError("Wat?") try: WebDriverWait(self.selenium_chromium_driver, 45).until(condition) success = True self.log.info("Successfully accessed main page!") except TimeoutException: self.log.error("Could not pass through cloudflare blocking!") success = False # Add cookies to cookiejar self._syncOutOfSeleniumChromiumWebDriver() self._syncCookiesFromFile() return success
[ "def", "stepThroughJsWaf_selenium_chromium", "(", "self", ",", "url", ",", "titleContains", "=", "''", ",", "titleNotContains", "=", "''", ")", ":", "if", "(", "not", "titleContains", ")", "and", "(", "not", "titleNotContains", ")", ":", "raise", "ValueError",...
Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests.
[ "Use", "Selenium", "+", "SeleniumChromium", "to", "access", "a", "resource", "behind", "cloudflare", "protection", "." ]
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/SeleniumModules/SeleniumChromiumMixin.py#L156-L212
cs50/lib50
lib50/config.py
Loader.scope
def scope(self, key, *tags, default=None): """Only apply tags and default for top-level key, effectively scoping the tags.""" scope = self._scopes[key] tags = self._ensure_exclamation(tags) default = default if not default or default.startswith("!") else "!" + default if scope: scope[0] = scope[0] + tags scope[1] = default if default else scope[1] else: scope.append(tags) scope.append(default)
python
def scope(self, key, *tags, default=None): """Only apply tags and default for top-level key, effectively scoping the tags.""" scope = self._scopes[key] tags = self._ensure_exclamation(tags) default = default if not default or default.startswith("!") else "!" + default if scope: scope[0] = scope[0] + tags scope[1] = default if default else scope[1] else: scope.append(tags) scope.append(default)
[ "def", "scope", "(", "self", ",", "key", ",", "*", "tags", ",", "default", "=", "None", ")", ":", "scope", "=", "self", ".", "_scopes", "[", "key", "]", "tags", "=", "self", ".", "_ensure_exclamation", "(", "tags", ")", "default", "=", "default", "...
Only apply tags and default for top-level key, effectively scoping the tags.
[ "Only", "apply", "tags", "and", "default", "for", "top", "-", "level", "key", "effectively", "scoping", "the", "tags", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L47-L58
cs50/lib50
lib50/config.py
Loader.load
def load(self, content): """Parse yaml content.""" # Try parsing the YAML with global tags try: config = yaml.load(content, Loader=self._loader(self._global_tags)) except yaml.YAMLError: raise InvalidConfigError(_("Config is not valid yaml.")) # Try extracting just the tool portion try: config = config[self.tool] except (TypeError, KeyError): return None # If no scopes, just apply global default if not isinstance(config, dict): config = self._apply_default(config, self._global_default) else: # Figure out what scopes exist scoped_keys = set(key for key in self._scopes) # For every scope for key in config: # If scope has custom tags, apply if key in scoped_keys: # local tags, and local default tags, default = self._scopes[key] # Inherit global default if no local default if not default: default = self._global_default config[key] = self._apply_default(config[key], default) self._apply_scope(config[key], tags) # Otherwise just apply global default else: config[key] = self._apply_default(config[key], self._global_default) self._validate(config) return config
python
def load(self, content): """Parse yaml content.""" # Try parsing the YAML with global tags try: config = yaml.load(content, Loader=self._loader(self._global_tags)) except yaml.YAMLError: raise InvalidConfigError(_("Config is not valid yaml.")) # Try extracting just the tool portion try: config = config[self.tool] except (TypeError, KeyError): return None # If no scopes, just apply global default if not isinstance(config, dict): config = self._apply_default(config, self._global_default) else: # Figure out what scopes exist scoped_keys = set(key for key in self._scopes) # For every scope for key in config: # If scope has custom tags, apply if key in scoped_keys: # local tags, and local default tags, default = self._scopes[key] # Inherit global default if no local default if not default: default = self._global_default config[key] = self._apply_default(config[key], default) self._apply_scope(config[key], tags) # Otherwise just apply global default else: config[key] = self._apply_default(config[key], self._global_default) self._validate(config) return config
[ "def", "load", "(", "self", ",", "content", ")", ":", "# Try parsing the YAML with global tags", "try", ":", "config", "=", "yaml", ".", "load", "(", "content", ",", "Loader", "=", "self", ".", "_loader", "(", "self", ".", "_global_tags", ")", ")", "except...
Parse yaml content.
[ "Parse", "yaml", "content", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L60-L100
cs50/lib50
lib50/config.py
Loader._loader
def _loader(self, tags): """Create a yaml Loader.""" class ConfigLoader(SafeLoader): pass ConfigLoader.add_multi_constructor("", lambda loader, prefix, node: TaggedValue(node.value, node.tag, *tags)) return ConfigLoader
python
def _loader(self, tags): """Create a yaml Loader.""" class ConfigLoader(SafeLoader): pass ConfigLoader.add_multi_constructor("", lambda loader, prefix, node: TaggedValue(node.value, node.tag, *tags)) return ConfigLoader
[ "def", "_loader", "(", "self", ",", "tags", ")", ":", "class", "ConfigLoader", "(", "SafeLoader", ")", ":", "pass", "ConfigLoader", ".", "add_multi_constructor", "(", "\"\"", ",", "lambda", "loader", ",", "prefix", ",", "node", ":", "TaggedValue", "(", "no...
Create a yaml Loader.
[ "Create", "a", "yaml", "Loader", "." ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L102-L107
cs50/lib50
lib50/config.py
Loader._validate
def _validate(self, config): """Check whether every TaggedValue has a valid tag, otherwise raise InvalidConfigError""" if isinstance(config, dict): # Recursively validate each item in the config for val in config.values(): self._validate(val) elif isinstance(config, list): # Recursively validate each item in the config for item in config: self._validate(item) elif isinstance(config, TaggedValue): tagged_value = config # if tagged_value is invalid, error if tagged_value.tag not in tagged_value.tags: raise InvalidConfigError(_("{} is not a valid tag for {}".format(tagged_value.tag, self.tool)))
python
def _validate(self, config): """Check whether every TaggedValue has a valid tag, otherwise raise InvalidConfigError""" if isinstance(config, dict): # Recursively validate each item in the config for val in config.values(): self._validate(val) elif isinstance(config, list): # Recursively validate each item in the config for item in config: self._validate(item) elif isinstance(config, TaggedValue): tagged_value = config # if tagged_value is invalid, error if tagged_value.tag not in tagged_value.tags: raise InvalidConfigError(_("{} is not a valid tag for {}".format(tagged_value.tag, self.tool)))
[ "def", "_validate", "(", "self", ",", "config", ")", ":", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "# Recursively validate each item in the config", "for", "val", "in", "config", ".", "values", "(", ")", ":", "self", ".", "_validate", "(", ...
Check whether every TaggedValue has a valid tag, otherwise raise InvalidConfigError
[ "Check", "whether", "every", "TaggedValue", "has", "a", "valid", "tag", "otherwise", "raise", "InvalidConfigError" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L109-L126
cs50/lib50
lib50/config.py
Loader._apply_default
def _apply_default(self, config, default): """ Apply default value to every str in config. Also ensure every TaggedValue has default in .tags """ # No default, nothing to be done here if not default: return config # If the entire config is just a string, return default TaggedValue if isinstance(config, str): return TaggedValue(config, default, default, *self._global_tags) if isinstance(config, dict): # Recursively apply defaults for each item in the config for key, val in config.items(): config[key] = self._apply_default(val, default) elif isinstance(config, list): # Recursively apply defaults for each item in the config for i, val in enumerate(config): config[i] = self._apply_default(val, default) elif isinstance(config, TaggedValue): # Make sure each TaggedValue knows about the default tag config.tags.add(default) return config
python
def _apply_default(self, config, default): """ Apply default value to every str in config. Also ensure every TaggedValue has default in .tags """ # No default, nothing to be done here if not default: return config # If the entire config is just a string, return default TaggedValue if isinstance(config, str): return TaggedValue(config, default, default, *self._global_tags) if isinstance(config, dict): # Recursively apply defaults for each item in the config for key, val in config.items(): config[key] = self._apply_default(val, default) elif isinstance(config, list): # Recursively apply defaults for each item in the config for i, val in enumerate(config): config[i] = self._apply_default(val, default) elif isinstance(config, TaggedValue): # Make sure each TaggedValue knows about the default tag config.tags.add(default) return config
[ "def", "_apply_default", "(", "self", ",", "config", ",", "default", ")", ":", "# No default, nothing to be done here", "if", "not", "default", ":", "return", "config", "# If the entire config is just a string, return default TaggedValue", "if", "isinstance", "(", "config",...
Apply default value to every str in config. Also ensure every TaggedValue has default in .tags
[ "Apply", "default", "value", "to", "every", "str", "in", "config", ".", "Also", "ensure", "every", "TaggedValue", "has", "default", "in", ".", "tags" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L128-L155
cs50/lib50
lib50/config.py
Loader._apply_scope
def _apply_scope(self, config, tags): """Add locally scoped tags to config""" if isinstance(config, dict): # Recursively _apply_scope for each item in the config for val in config.values(): self._apply_scope(val, tags) elif isinstance(config, list): # Recursively _apply_scope for each item in the config for item in config: self._apply_scope(item, tags) elif isinstance(config, TaggedValue): tagged_value = config # add all local tags tagged_value.tags |= set(tags) for tag in tags: if not hasattr(tagged_value, tag): setattr(tagged_value, tag, False)
python
def _apply_scope(self, config, tags): """Add locally scoped tags to config""" if isinstance(config, dict): # Recursively _apply_scope for each item in the config for val in config.values(): self._apply_scope(val, tags) elif isinstance(config, list): # Recursively _apply_scope for each item in the config for item in config: self._apply_scope(item, tags) elif isinstance(config, TaggedValue): tagged_value = config # add all local tags tagged_value.tags |= set(tags) for tag in tags: if not hasattr(tagged_value, tag): setattr(tagged_value, tag, False)
[ "def", "_apply_scope", "(", "self", ",", "config", ",", "tags", ")", ":", "if", "isinstance", "(", "config", ",", "dict", ")", ":", "# Recursively _apply_scope for each item in the config", "for", "val", "in", "config", ".", "values", "(", ")", ":", "self", ...
Add locally scoped tags to config
[ "Add", "locally", "scoped", "tags", "to", "config" ]
train
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/config.py#L157-L176
CodyKochmann/strict_functions
strict_functions/overload.py
Overload.has_args
def has_args(): ''' returns true if the decorator invocation had arguments passed to it before being sent a function to decorate ''' no_args_syntax = '@overload' args_syntax = no_args_syntax + '(' args, no_args = [(-1,-1)], [(-1,-1)] for i, line in enumerate(Overload.traceback_lines()): if args_syntax in line: args.append((i, line.find(args_syntax))) if no_args_syntax in line: no_args.append((i, line.find(no_args_syntax))) args, no_args = max(args), max(no_args) if sum(args)+sum(no_args) == -4: # couldnt find invocation return False return args >= no_args
python
def has_args(): ''' returns true if the decorator invocation had arguments passed to it before being sent a function to decorate ''' no_args_syntax = '@overload' args_syntax = no_args_syntax + '(' args, no_args = [(-1,-1)], [(-1,-1)] for i, line in enumerate(Overload.traceback_lines()): if args_syntax in line: args.append((i, line.find(args_syntax))) if no_args_syntax in line: no_args.append((i, line.find(no_args_syntax))) args, no_args = max(args), max(no_args) if sum(args)+sum(no_args) == -4: # couldnt find invocation return False return args >= no_args
[ "def", "has_args", "(", ")", ":", "no_args_syntax", "=", "'@overload'", "args_syntax", "=", "no_args_syntax", "+", "'('", "args", ",", "no_args", "=", "[", "(", "-", "1", ",", "-", "1", ")", "]", ",", "[", "(", "-", "1", ",", "-", "1", ")", "]", ...
returns true if the decorator invocation had arguments passed to it before being sent a function to decorate
[ "returns", "true", "if", "the", "decorator", "invocation", "had", "arguments", "passed", "to", "it", "before", "being", "sent", "a", "function", "to", "decorate" ]
train
https://github.com/CodyKochmann/strict_functions/blob/adaf78084c66929552d80c95f980e7e0c4331478/strict_functions/overload.py#L23-L39
CodyKochmann/strict_functions
strict_functions/overload.py
Overload.identify
def identify(fn): ''' returns a tuple that is used to match functions to their neighbors in their resident namespaces ''' return ( fn.__globals__['__name__'], # module namespace getattr(fn, '__qualname__', getattr(fn, '__name__', '')) # class and function namespace ) def __init__(self, fn): self.validate_function(fn) self.configured = False self.has_backup_plan = False if self.has_args(): self.backup_plan = fn else: self.id = self.identify(fn) self.backup_plan = big.overload._cache.get(self.id, None) #if self.id in overload._cache: # self.backup_plan = self.configure_with(fn) #wraps(fn)(self) def __call__(self, *args, **kwargs): #print(locals()) try: # try running like normal return self.fn(*args, **kwargs) except Exception as ex: if self.has_backup_plan: return self.backup_plan(*args, **kwargs) # run backup plan elif self.configured: raise ex # no backup plan, abort else: # complete unconfigured setup self.configure_with(*args, **kwargs) return self
python
def identify(fn): ''' returns a tuple that is used to match functions to their neighbors in their resident namespaces ''' return ( fn.__globals__['__name__'], # module namespace getattr(fn, '__qualname__', getattr(fn, '__name__', '')) # class and function namespace ) def __init__(self, fn): self.validate_function(fn) self.configured = False self.has_backup_plan = False if self.has_args(): self.backup_plan = fn else: self.id = self.identify(fn) self.backup_plan = big.overload._cache.get(self.id, None) #if self.id in overload._cache: # self.backup_plan = self.configure_with(fn) #wraps(fn)(self) def __call__(self, *args, **kwargs): #print(locals()) try: # try running like normal return self.fn(*args, **kwargs) except Exception as ex: if self.has_backup_plan: return self.backup_plan(*args, **kwargs) # run backup plan elif self.configured: raise ex # no backup plan, abort else: # complete unconfigured setup self.configure_with(*args, **kwargs) return self
[ "def", "identify", "(", "fn", ")", ":", "return", "(", "fn", ".", "__globals__", "[", "'__name__'", "]", ",", "# module namespace", "getattr", "(", "fn", ",", "'__qualname__'", ",", "getattr", "(", "fn", ",", "'__name__'", ",", "''", ")", ")", "# class a...
returns a tuple that is used to match functions to their neighbors in their resident namespaces
[ "returns", "a", "tuple", "that", "is", "used", "to", "match", "functions", "to", "their", "neighbors", "in", "their", "resident", "namespaces" ]
train
https://github.com/CodyKochmann/strict_functions/blob/adaf78084c66929552d80c95f980e7e0c4331478/strict_functions/overload.py#L42-L76
CodyKochmann/strict_functions
strict_functions/overload.py
Overload.overload
def overload(fn, function_to_overload=None): ''' This function decorator allows you to overload already defined functions. The execution of overloaded functions is done by trying the original version first and if it fails, the variables are handed off to the overloading function. While this does seem like a sloppy way to go about choosing the execution of functions, this gives you far more control in terms of how you want each function to be selected and allows you to program for the "ideal situation" first. With this approach, you can simply require very specific conditions that would apply to a majority of the use cases of the function and allow the code to mitigate edge case scenarios only when the edge cases show up vs checking for edge cases on every single usage of the function. This approach rewards functions that are designed with proper input validation, which you should be adding anyways. #------------------------------------------------------------------------------ # Example Usage Below #------------------------------------------------------------------------------ def my_print(arg): print('running original my_print') print(arg) @overload def my_print(arg): assert type(arg) == list print('running list my_print') print(', '.join(str(i) for i in arg)) @overload def my_print(arg): assert type(arg) == dict print('running dict my_print') out = ('='.join((str(k), str(v))) for k,v in arg.items()) print(' | '.join(out)) my_print(list(range(10))) # running list my_print # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 my_print(tuple(range(10))) # running original my_print # (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) my_print({i:i*2 for i in range(10)}) # running dict my_print # 0=0 | 1=2 | 2=4 | 3=6 | 4=8 | 5=10 | 6=12 | 7=14 | 8=16 | 9=18 ''' if function_to_overload is None: if Overload.has_args(): return Overload.configured_decorator(fn) else: return Overload.default_decorator(fn) else: return Overload.configured_decorator(function_to_overload)(fn)
python
def overload(fn, function_to_overload=None): ''' This function decorator allows you to overload already defined functions. The execution of overloaded functions is done by trying the original version first and if it fails, the variables are handed off to the overloading function. While this does seem like a sloppy way to go about choosing the execution of functions, this gives you far more control in terms of how you want each function to be selected and allows you to program for the "ideal situation" first. With this approach, you can simply require very specific conditions that would apply to a majority of the use cases of the function and allow the code to mitigate edge case scenarios only when the edge cases show up vs checking for edge cases on every single usage of the function. This approach rewards functions that are designed with proper input validation, which you should be adding anyways. #------------------------------------------------------------------------------ # Example Usage Below #------------------------------------------------------------------------------ def my_print(arg): print('running original my_print') print(arg) @overload def my_print(arg): assert type(arg) == list print('running list my_print') print(', '.join(str(i) for i in arg)) @overload def my_print(arg): assert type(arg) == dict print('running dict my_print') out = ('='.join((str(k), str(v))) for k,v in arg.items()) print(' | '.join(out)) my_print(list(range(10))) # running list my_print # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 my_print(tuple(range(10))) # running original my_print # (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) my_print({i:i*2 for i in range(10)}) # running dict my_print # 0=0 | 1=2 | 2=4 | 3=6 | 4=8 | 5=10 | 6=12 | 7=14 | 8=16 | 9=18 ''' if function_to_overload is None: if Overload.has_args(): return Overload.configured_decorator(fn) else: return Overload.default_decorator(fn) else: return Overload.configured_decorator(function_to_overload)(fn)
[ "def", "overload", "(", "fn", ",", "function_to_overload", "=", "None", ")", ":", "if", "function_to_overload", "is", "None", ":", "if", "Overload", ".", "has_args", "(", ")", ":", "return", "Overload", ".", "configured_decorator", "(", "fn", ")", "else", ...
This function decorator allows you to overload already defined functions. The execution of overloaded functions is done by trying the original version first and if it fails, the variables are handed off to the overloading function. While this does seem like a sloppy way to go about choosing the execution of functions, this gives you far more control in terms of how you want each function to be selected and allows you to program for the "ideal situation" first. With this approach, you can simply require very specific conditions that would apply to a majority of the use cases of the function and allow the code to mitigate edge case scenarios only when the edge cases show up vs checking for edge cases on every single usage of the function. This approach rewards functions that are designed with proper input validation, which you should be adding anyways. #------------------------------------------------------------------------------ # Example Usage Below #------------------------------------------------------------------------------ def my_print(arg): print('running original my_print') print(arg) @overload def my_print(arg): assert type(arg) == list print('running list my_print') print(', '.join(str(i) for i in arg)) @overload def my_print(arg): assert type(arg) == dict print('running dict my_print') out = ('='.join((str(k), str(v))) for k,v in arg.items()) print(' | '.join(out)) my_print(list(range(10))) # running list my_print # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 my_print(tuple(range(10))) # running original my_print # (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) my_print({i:i*2 for i in range(10)}) # running dict my_print # 0=0 | 1=2 | 2=4 | 3=6 | 4=8 | 5=10 | 6=12 | 7=14 | 8=16 | 9=18
[ "This", "function", "decorator", "allows", "you", "to", "overload", "already", "defined", "functions", ".", "The", "execution", "of", "overloaded", "functions", "is", "done", "by", "trying", "the", "original", "version", "first", "and", "if", "it", "fails", "t...
train
https://github.com/CodyKochmann/strict_functions/blob/adaf78084c66929552d80c95f980e7e0c4331478/strict_functions/overload.py#L112-L171
MacHu-GWU/constant2-project
constant2/pkg/inspect_mate/getter.py
get_all_attributes
def get_all_attributes(klass_or_instance): """Get all attribute members (attribute, property style method). """ pairs = list() for attr, value in inspect.getmembers( klass_or_instance, lambda x: not inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
python
def get_all_attributes(klass_or_instance): """Get all attribute members (attribute, property style method). """ pairs = list() for attr, value in inspect.getmembers( klass_or_instance, lambda x: not inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
[ "def", "get_all_attributes", "(", "klass_or_instance", ")", ":", "pairs", "=", "list", "(", ")", "for", "attr", ",", "value", "in", "inspect", ".", "getmembers", "(", "klass_or_instance", ",", "lambda", "x", ":", "not", "inspect", ".", "isroutine", "(", "x...
Get all attribute members (attribute, property style method).
[ "Get", "all", "attribute", "members", "(", "attribute", "property", "style", "method", ")", "." ]
train
https://github.com/MacHu-GWU/constant2-project/blob/ccf7e14b0e23f9f4bfd13a3e2ce4a1142e570d4f/constant2/pkg/inspect_mate/getter.py#L71-L79
i3visio/entify
entify/lib/patterns/regexp.py
RegexpObject.getResults
def getResults(self, parFound = None): ''' Function to obtain the Dictionarythat represents this object. :param parFound: values to return. :return: The output format will be like: [{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }] ''' # Defining a dictionary results = [] # Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found. #results[self.name] = {"reg_exp" : self.reg_exp, "found_exp" : parFound} #results[self.name] = parFound if len(parFound ) >0: for found in parFound: aux = {} aux["type"] = self.name aux["value"] = found aux["attributes"] = self.getAttributes(found) results.append(aux) return results
python
def getResults(self, parFound = None): ''' Function to obtain the Dictionarythat represents this object. :param parFound: values to return. :return: The output format will be like: [{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }] ''' # Defining a dictionary results = [] # Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found. #results[self.name] = {"reg_exp" : self.reg_exp, "found_exp" : parFound} #results[self.name] = parFound if len(parFound ) >0: for found in parFound: aux = {} aux["type"] = self.name aux["value"] = found aux["attributes"] = self.getAttributes(found) results.append(aux) return results
[ "def", "getResults", "(", "self", ",", "parFound", "=", "None", ")", ":", "# Defining a dictionary", "results", "=", "[", "]", "# Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found.", "#results[self.name] =...
Function to obtain the Dictionarythat represents this object. :param parFound: values to return. :return: The output format will be like: [{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }]
[ "Function", "to", "obtain", "the", "Dictionarythat", "represents", "this", "object", ".", ":", "param", "parFound", ":", "values", "to", "return", "." ]
train
https://github.com/i3visio/entify/blob/51c5b89cebee3a39d44d0918e2798739361f337c/entify/lib/patterns/regexp.py#L74-L95
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
get_requirement_from_url
def get_requirement_from_url(url): """Get a requirement from the URL, if possible. This looks for #egg in the URL""" link = Link(url) egg_info = link.egg_fragment if not egg_info: egg_info = splitext(link.filename)[0] return package_to_requirement(egg_info)
python
def get_requirement_from_url(url): """Get a requirement from the URL, if possible. This looks for #egg in the URL""" link = Link(url) egg_info = link.egg_fragment if not egg_info: egg_info = splitext(link.filename)[0] return package_to_requirement(egg_info)
[ "def", "get_requirement_from_url", "(", "url", ")", ":", "link", "=", "Link", "(", "url", ")", "egg_info", "=", "link", ".", "egg_fragment", "if", "not", "egg_info", ":", "egg_info", "=", "splitext", "(", "link", ".", "filename", ")", "[", "0", "]", "r...
Get a requirement from the URL, if possible. This looks for #egg in the URL
[ "Get", "a", "requirement", "from", "the", "URL", "if", "possible", ".", "This", "looks", "for", "#egg", "in", "the", "URL" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L628-L635
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
package_to_requirement
def package_to_requirement(package_name): """Translate a name like Foo-1.2 to Foo==1.3""" match = re.search(r'^(.*?)-(dev|\d.*)', package_name) if match: name = match.group(1) version = match.group(2) else: name = package_name version = '' if version: return '%s==%s' % (name, version) else: return name
python
def package_to_requirement(package_name): """Translate a name like Foo-1.2 to Foo==1.3""" match = re.search(r'^(.*?)-(dev|\d.*)', package_name) if match: name = match.group(1) version = match.group(2) else: name = package_name version = '' if version: return '%s==%s' % (name, version) else: return name
[ "def", "package_to_requirement", "(", "package_name", ")", ":", "match", "=", "re", ".", "search", "(", "r'^(.*?)-(dev|\\d.*)'", ",", "package_name", ")", "if", "match", ":", "name", "=", "match", ".", "group", "(", "1", ")", "version", "=", "match", ".", ...
Translate a name like Foo-1.2 to Foo==1.3
[ "Translate", "a", "name", "like", "Foo", "-", "1", ".", "2", "to", "Foo", "==", "1", ".", "3" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L638-L650
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
get_mirrors
def get_mirrors(hostname=None): """Return the list of mirrors from the last record found on the DNS entry:: >>> from pip.index import get_mirrors >>> get_mirrors() ['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org', 'd.pypi.python.org'] Originally written for the distutils2 project by Alexis Metaireau. """ if hostname is None: hostname = DEFAULT_MIRROR_URL # return the last mirror registered on PyPI. try: hostname = socket.gethostbyname_ex(hostname)[0] except socket.gaierror: return [] end_letter = hostname.split(".", 1) # determine the list from the last one. return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])]
python
def get_mirrors(hostname=None): """Return the list of mirrors from the last record found on the DNS entry:: >>> from pip.index import get_mirrors >>> get_mirrors() ['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org', 'd.pypi.python.org'] Originally written for the distutils2 project by Alexis Metaireau. """ if hostname is None: hostname = DEFAULT_MIRROR_URL # return the last mirror registered on PyPI. try: hostname = socket.gethostbyname_ex(hostname)[0] except socket.gaierror: return [] end_letter = hostname.split(".", 1) # determine the list from the last one. return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])]
[ "def", "get_mirrors", "(", "hostname", "=", "None", ")", ":", "if", "hostname", "is", "None", ":", "hostname", "=", "DEFAULT_MIRROR_URL", "# return the last mirror registered on PyPI.", "try", ":", "hostname", "=", "socket", ".", "gethostbyname_ex", "(", "hostname",...
Return the list of mirrors from the last record found on the DNS entry:: >>> from pip.index import get_mirrors >>> get_mirrors() ['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org', 'd.pypi.python.org'] Originally written for the distutils2 project by Alexis Metaireau.
[ "Return", "the", "list", "of", "mirrors", "from", "the", "last", "record", "found", "on", "the", "DNS", "entry", "::" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L653-L675
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
string_range
def string_range(last): """Compute the range of string between "a" and last. This works for simple "a to z" lists, but also for "a to zz" lists. """ for k in range(len(last)): for x in product(string.ascii_lowercase, repeat=k+1): result = ''.join(x) yield result if result == last: return
python
def string_range(last): """Compute the range of string between "a" and last. This works for simple "a to z" lists, but also for "a to zz" lists. """ for k in range(len(last)): for x in product(string.ascii_lowercase, repeat=k+1): result = ''.join(x) yield result if result == last: return
[ "def", "string_range", "(", "last", ")", ":", "for", "k", "in", "range", "(", "len", "(", "last", ")", ")", ":", "for", "x", "in", "product", "(", "string", ".", "ascii_lowercase", ",", "repeat", "=", "k", "+", "1", ")", ":", "result", "=", "''",...
Compute the range of string between "a" and last. This works for simple "a to z" lists, but also for "a to zz" lists.
[ "Compute", "the", "range", "of", "string", "between", "a", "and", "last", "." ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L678-L688
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
PackageFinder._sort_locations
def _sort_locations(locations): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate # list def sort_path(path): url = path_to_url2(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: if url.startswith('file:'): path = url_to_path(url) if os.path.isdir(path): path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif os.path.isfile(path): sort_path(path) else: urls.append(url) return files, urls
python
def _sort_locations(locations): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate # list def sort_path(path): url = path_to_url2(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: if url.startswith('file:'): path = url_to_path(url) if os.path.isdir(path): path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif os.path.isfile(path): sort_path(path) else: urls.append(url) return files, urls
[ "def", "_sort_locations", "(", "locations", ")", ":", "files", "=", "[", "]", "urls", "=", "[", "]", "# puts the url for the given file path into the appropriate", "# list", "def", "sort_path", "(", "path", ")", ":", "url", "=", "path_to_url2", "(", "path", ")",...
Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls)
[ "Sort", "locations", "into", "files", "(", "archives", ")", "and", "urls", "and", "return", "a", "pair", "of", "lists", "(", "files", "urls", ")" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L59-L87
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
PackageFinder._get_pages
def _get_pages(self, locations, req): """Yields (page, page_url) from the given locations, skipping locations that have errors, and adding download/homepage links""" pending_queue = Queue() for location in locations: pending_queue.put(location) done = [] seen = set() threads = [] for i in range(min(10, len(locations))): t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen)) t.setDaemon(True) threads.append(t) t.start() for t in threads: t.join() return done
python
def _get_pages(self, locations, req): """Yields (page, page_url) from the given locations, skipping locations that have errors, and adding download/homepage links""" pending_queue = Queue() for location in locations: pending_queue.put(location) done = [] seen = set() threads = [] for i in range(min(10, len(locations))): t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen)) t.setDaemon(True) threads.append(t) t.start() for t in threads: t.join() return done
[ "def", "_get_pages", "(", "self", ",", "locations", ",", "req", ")", ":", "pending_queue", "=", "Queue", "(", ")", "for", "location", "in", "locations", ":", "pending_queue", ".", "put", "(", "location", ")", "done", "=", "[", "]", "seen", "=", "set", ...
Yields (page, page_url) from the given locations, skipping locations that have errors, and adding download/homepage links
[ "Yields", "(", "page", "page_url", ")", "from", "the", "given", "locations", "skipping", "locations", "that", "have", "errors", "and", "adding", "download", "/", "homepage", "links" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L210-L226
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
PackageFinder._link_package_versions
def _link_package_versions(self, link, search_name): """ Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients. """ if link.egg_fragment: egg_info = link.egg_fragment else: egg_info, ext = link.splitext() if not ext: if link not in self.logged_links: logger.debug('Skipping link %s; not a file' % link) self.logged_links.add(link) return [] if egg_info.endswith('.tar'): # Special double-extension case: egg_info = egg_info[:-4] ext = '.tar' + ext if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'): if link not in self.logged_links: logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext)) self.logged_links.add(link) return [] version = self._egg_info_matches(egg_info, search_name, link) if version is None: logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name)) return [] match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: logger.debug('Skipping %s because Python version is incorrect' % link) return [] logger.debug('Found link %s, version: %s' % (link, version)) return [(pkg_resources.parse_version(version), link, version)]
python
def _link_package_versions(self, link, search_name): """ Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients. """ if link.egg_fragment: egg_info = link.egg_fragment else: egg_info, ext = link.splitext() if not ext: if link not in self.logged_links: logger.debug('Skipping link %s; not a file' % link) self.logged_links.add(link) return [] if egg_info.endswith('.tar'): # Special double-extension case: egg_info = egg_info[:-4] ext = '.tar' + ext if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'): if link not in self.logged_links: logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext)) self.logged_links.add(link) return [] version = self._egg_info_matches(egg_info, search_name, link) if version is None: logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name)) return [] match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: logger.debug('Skipping %s because Python version is incorrect' % link) return [] logger.debug('Found link %s, version: %s' % (link, version)) return [(pkg_resources.parse_version(version), link, version)]
[ "def", "_link_package_versions", "(", "self", ",", "link", ",", "search_name", ")", ":", "if", "link", ".", "egg_fragment", ":", "egg_info", "=", "link", ".", "egg_fragment", "else", ":", "egg_info", ",", "ext", "=", "link", ".", "splitext", "(", ")", "i...
Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients.
[ "Return", "an", "iterable", "of", "triples", "(", "pkg_resources_version_key", "link", "python_version", ")", "that", "can", "be", "extracted", "from", "the", "given", "link", "." ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L268-L308
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
PackageFinder._get_mirror_urls
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): """Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed. """ if not mirrors: mirrors = get_mirrors(main_mirror_url) # Should this be made "less random"? E.g. netselect like? random.shuffle(mirrors) mirror_urls = set() for mirror_url in mirrors: # Make sure we have a valid URL if not ("http://" or "https://" or "file://") in mirror_url: mirror_url = "http://%s" % mirror_url if not mirror_url.endswith("/simple"): mirror_url = "%s/simple/" % mirror_url mirror_urls.add(mirror_url) return list(mirror_urls)
python
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): """Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed. """ if not mirrors: mirrors = get_mirrors(main_mirror_url) # Should this be made "less random"? E.g. netselect like? random.shuffle(mirrors) mirror_urls = set() for mirror_url in mirrors: # Make sure we have a valid URL if not ("http://" or "https://" or "file://") in mirror_url: mirror_url = "http://%s" % mirror_url if not mirror_url.endswith("/simple"): mirror_url = "%s/simple/" % mirror_url mirror_urls.add(mirror_url) return list(mirror_urls)
[ "def", "_get_mirror_urls", "(", "self", ",", "mirrors", "=", "None", ",", "main_mirror_url", "=", "None", ")", ":", "if", "not", "mirrors", ":", "mirrors", "=", "get_mirrors", "(", "main_mirror_url", ")", "# Should this be made \"less random\"? E.g. netselect like?", ...
Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed.
[ "Retrieves", "a", "list", "of", "URLs", "from", "the", "main", "mirror", "DNS", "entry", "unless", "a", "list", "of", "mirror", "URLs", "are", "passed", "." ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L326-L344
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
HTMLPage._get_content_type
def _get_content_type(url): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not scheme in ('http', 'https', 'ftp', 'ftps'): ## FIXME: some warning or something? ## assertion error? return '' req = Urllib2HeadRequest(url, headers={'Host': netloc}) resp = urlopen(req) try: if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'): ## FIXME: doesn't handle redirects return '' return resp.info().get('content-type', '') finally: resp.close()
python
def _get_content_type(url): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not scheme in ('http', 'https', 'ftp', 'ftps'): ## FIXME: some warning or something? ## assertion error? return '' req = Urllib2HeadRequest(url, headers={'Host': netloc}) resp = urlopen(req) try: if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'): ## FIXME: doesn't handle redirects return '' return resp.info().get('content-type', '') finally: resp.close()
[ "def", "_get_content_type", "(", "url", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", "=", "urlparse", ".", "urlsplit", "(", "url", ")", "if", "not", "scheme", "in", "(", "'http'", ",", "'https'", ",", "'ftp'", ",", "...
Get the Content-Type of the given url, using a HEAD request
[ "Get", "the", "Content", "-", "Type", "of", "the", "given", "url", "using", "a", "HEAD", "request" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L477-L492
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
HTMLPage.links
def links(self): """Yields all links in the page""" for match in self._href_re.finditer(self.content): url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
python
def links(self): """Yields all links in the page""" for match in self._href_re.finditer(self.content): url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
[ "def", "links", "(", "self", ")", ":", "for", "match", "in", "self", ".", "_href_re", ".", "finditer", "(", "self", ".", "content", ")", ":", "url", "=", "match", ".", "group", "(", "1", ")", "or", "match", ".", "group", "(", "2", ")", "or", "m...
Yields all links in the page
[ "Yields", "all", "links", "in", "the", "page" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L505-L510
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py
HTMLPage.explicit_rel_links
def explicit_rel_links(self, rels=('homepage', 'download')): """Yields all links with the given relations""" for match in self._rel_re.finditer(self.content): found_rels = match.group(1).lower().split() for rel in rels: if rel in found_rels: break else: continue match = self._href_re.search(match.group(0)) if not match: continue url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
python
def explicit_rel_links(self, rels=('homepage', 'download')): """Yields all links with the given relations""" for match in self._rel_re.finditer(self.content): found_rels = match.group(1).lower().split() for rel in rels: if rel in found_rels: break else: continue match = self._href_re.search(match.group(0)) if not match: continue url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
[ "def", "explicit_rel_links", "(", "self", ",", "rels", "=", "(", "'homepage'", ",", "'download'", ")", ")", ":", "for", "match", "in", "self", ".", "_rel_re", ".", "finditer", "(", "self", ".", "content", ")", ":", "found_rels", "=", "match", ".", "gro...
Yields all links with the given relations
[ "Yields", "all", "links", "with", "the", "given", "relations" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/index.py#L518-L532
benley/butcher
butcher/targets/base.py
BaseBuilder._metahash
def _metahash(self): """Checksum hash of all the inputs to this rule. Output is invalid until collect_srcs and collect_deps have been run. In theory, if this hash doesn't change, the outputs won't change either, which makes it useful for caching. """ # BE CAREFUL when overriding/extending this method. You want to copy # the if(cached)/return(cached) part, then call this method, then at # the end update the cached metahash. Just like this code, basically, # only you call the method from the base class in the middle of it. If # you get this wrong it could result in butcher not noticing changed # inputs between runs, which could cause really nasty problems. # TODO(ben): the above warning seems avoidable with better memoization if self._cached_metahash: return self._cached_metahash # If you are extending this function in a subclass, # here is where you do: # BaseBuilder._metahash(self) log.debug('[%s]: Metahash input: %s', self.address, unicode(self.address)) mhash = util.hash_str(unicode(self.address)) log.debug('[%s]: Metahash input: %s', self.address, self.rule.params) mhash = util.hash_str(str(self.rule.params), hasher=mhash) for src in self.rule.source_files or []: log.debug('[%s]: Metahash input: %s', self.address, src) mhash = util.hash_str(src, hasher=mhash) mhash = util.hash_file(self.srcs_map[src], hasher=mhash) for dep in self.rule.composed_deps() or []: dep_rule = self.rule.subgraph.node[dep]['target_obj'] for item in dep_rule.output_files: log.debug('[%s]: Metahash input: %s', self.address, item) item_path = os.path.join(self.buildroot, item) mhash = util.hash_str(item, hasher=mhash) mhash = util.hash_file(item_path, hasher=mhash) self._cached_metahash = mhash return mhash
python
def _metahash(self): """Checksum hash of all the inputs to this rule. Output is invalid until collect_srcs and collect_deps have been run. In theory, if this hash doesn't change, the outputs won't change either, which makes it useful for caching. """ # BE CAREFUL when overriding/extending this method. You want to copy # the if(cached)/return(cached) part, then call this method, then at # the end update the cached metahash. Just like this code, basically, # only you call the method from the base class in the middle of it. If # you get this wrong it could result in butcher not noticing changed # inputs between runs, which could cause really nasty problems. # TODO(ben): the above warning seems avoidable with better memoization if self._cached_metahash: return self._cached_metahash # If you are extending this function in a subclass, # here is where you do: # BaseBuilder._metahash(self) log.debug('[%s]: Metahash input: %s', self.address, unicode(self.address)) mhash = util.hash_str(unicode(self.address)) log.debug('[%s]: Metahash input: %s', self.address, self.rule.params) mhash = util.hash_str(str(self.rule.params), hasher=mhash) for src in self.rule.source_files or []: log.debug('[%s]: Metahash input: %s', self.address, src) mhash = util.hash_str(src, hasher=mhash) mhash = util.hash_file(self.srcs_map[src], hasher=mhash) for dep in self.rule.composed_deps() or []: dep_rule = self.rule.subgraph.node[dep]['target_obj'] for item in dep_rule.output_files: log.debug('[%s]: Metahash input: %s', self.address, item) item_path = os.path.join(self.buildroot, item) mhash = util.hash_str(item, hasher=mhash) mhash = util.hash_file(item_path, hasher=mhash) self._cached_metahash = mhash return mhash
[ "def", "_metahash", "(", "self", ")", ":", "# BE CAREFUL when overriding/extending this method. You want to copy", "# the if(cached)/return(cached) part, then call this method, then at", "# the end update the cached metahash. Just like this code, basically,", "# only you call the method from the b...
Checksum hash of all the inputs to this rule. Output is invalid until collect_srcs and collect_deps have been run. In theory, if this hash doesn't change, the outputs won't change either, which makes it useful for caching.
[ "Checksum", "hash", "of", "all", "the", "inputs", "to", "this", "rule", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L70-L111
benley/butcher
butcher/targets/base.py
BaseBuilder.collect_outs
def collect_outs(self): """Collect and store the outputs from this rule.""" # TODO: this should probably live in CacheManager. for outfile in self.rule.output_files or []: outfile_built = os.path.join(self.buildroot, outfile) if not os.path.exists(outfile_built): raise error.TargetBuildFailed( self.address, 'Output file is missing: %s' % outfile) #git_sha = gitrepo.RepoState().GetRepo(self.address.repo).repo.commit() # git_sha is insufficient, and is actually not all that useful. # More factors to include in hash: # - commit/state of source repo of all dependencies # (or all input files?) # - Actually I like that idea: hash all the input files! # - versions of build tools used (?) metahash = self._metahash() log.debug('[%s]: Metahash: %s', self.address, metahash.hexdigest()) # TODO: record git repo state and buildoptions in cachemgr # TODO: move cachemgr to outer controller(?) self.cachemgr.putfile(outfile_built, self.buildroot, metahash)
python
def collect_outs(self): """Collect and store the outputs from this rule.""" # TODO: this should probably live in CacheManager. for outfile in self.rule.output_files or []: outfile_built = os.path.join(self.buildroot, outfile) if not os.path.exists(outfile_built): raise error.TargetBuildFailed( self.address, 'Output file is missing: %s' % outfile) #git_sha = gitrepo.RepoState().GetRepo(self.address.repo).repo.commit() # git_sha is insufficient, and is actually not all that useful. # More factors to include in hash: # - commit/state of source repo of all dependencies # (or all input files?) # - Actually I like that idea: hash all the input files! # - versions of build tools used (?) metahash = self._metahash() log.debug('[%s]: Metahash: %s', self.address, metahash.hexdigest()) # TODO: record git repo state and buildoptions in cachemgr # TODO: move cachemgr to outer controller(?) self.cachemgr.putfile(outfile_built, self.buildroot, metahash)
[ "def", "collect_outs", "(", "self", ")", ":", "# TODO: this should probably live in CacheManager.", "for", "outfile", "in", "self", ".", "rule", ".", "output_files", "or", "[", "]", ":", "outfile_built", "=", "os", ".", "path", ".", "join", "(", "self", ".", ...
Collect and store the outputs from this rule.
[ "Collect", "and", "store", "the", "outputs", "from", "this", "rule", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L113-L133
benley/butcher
butcher/targets/base.py
BaseBuilder.is_cached
def is_cached(self): """Returns true if this rule is already cached.""" # TODO: cache by target+hash, not per file. try: for item in self.rule.output_files: log.info(item) self.cachemgr.in_cache(item, self._metahash()) except cache.CacheMiss: log.info('[%s]: Not cached.', self.address) return False else: log.info('[%s]: found in cache.', self.address) return True
python
def is_cached(self): """Returns true if this rule is already cached.""" # TODO: cache by target+hash, not per file. try: for item in self.rule.output_files: log.info(item) self.cachemgr.in_cache(item, self._metahash()) except cache.CacheMiss: log.info('[%s]: Not cached.', self.address) return False else: log.info('[%s]: found in cache.', self.address) return True
[ "def", "is_cached", "(", "self", ")", ":", "# TODO: cache by target+hash, not per file.", "try", ":", "for", "item", "in", "self", ".", "rule", ".", "output_files", ":", "log", ".", "info", "(", "item", ")", "self", ".", "cachemgr", ".", "in_cache", "(", "...
Returns true if this rule is already cached.
[ "Returns", "true", "if", "this", "rule", "is", "already", "cached", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L139-L151
benley/butcher
butcher/targets/base.py
BaseBuilder.get_from_cache
def get_from_cache(self): """See if this rule has already been built and cached.""" for item in self.rule.output_files: dstpath = os.path.join(self.buildroot, item) self.linkorcopy( self.cachemgr.path_in_cache(item, self._metahash()), dstpath)
python
def get_from_cache(self): """See if this rule has already been built and cached.""" for item in self.rule.output_files: dstpath = os.path.join(self.buildroot, item) self.linkorcopy( self.cachemgr.path_in_cache(item, self._metahash()), dstpath)
[ "def", "get_from_cache", "(", "self", ")", ":", "for", "item", "in", "self", ".", "rule", ".", "output_files", ":", "dstpath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "buildroot", ",", "item", ")", "self", ".", "linkorcopy", "(", "self...
See if this rule has already been built and cached.
[ "See", "if", "this", "rule", "has", "already", "been", "built", "and", "cached", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L153-L159
benley/butcher
butcher/targets/base.py
BaseBuilder.linkorcopy
def linkorcopy(self, src, dst): """hardlink src file to dst if possible, otherwise copy.""" if os.path.isdir(dst): log.warn('linkorcopy given a directory as destination. ' 'Use caution.') log.debug('src: %s dst: %s', src, dst) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) if self.linkfiles: log.debug('Linking: %s -> %s', src, dst) os.link(src, dst) else: log.debug('Copying: %s -> %s', src, dst) shutil.copy2(src, dst)
python
def linkorcopy(self, src, dst): """hardlink src file to dst if possible, otherwise copy.""" if os.path.isdir(dst): log.warn('linkorcopy given a directory as destination. ' 'Use caution.') log.debug('src: %s dst: %s', src, dst) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) if self.linkfiles: log.debug('Linking: %s -> %s', src, dst) os.link(src, dst) else: log.debug('Copying: %s -> %s', src, dst) shutil.copy2(src, dst)
[ "def", "linkorcopy", "(", "self", ",", "src", ",", "dst", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "dst", ")", ":", "log", ".", "warn", "(", "'linkorcopy given a directory as destination. '", "'Use caution.'", ")", "log", ".", "debug", "(", ...
hardlink src file to dst if possible, otherwise copy.
[ "hardlink", "src", "file", "to", "dst", "if", "possible", "otherwise", "copy", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L162-L177
benley/butcher
butcher/targets/base.py
BaseBuilder.rulefor
def rulefor(self, addr): """Return the rule object for an address from our deps graph.""" return self.rule.subgraph.node[self.rule.makeaddress(addr)][ 'target_obj']
python
def rulefor(self, addr): """Return the rule object for an address from our deps graph.""" return self.rule.subgraph.node[self.rule.makeaddress(addr)][ 'target_obj']
[ "def", "rulefor", "(", "self", ",", "addr", ")", ":", "return", "self", ".", "rule", ".", "subgraph", ".", "node", "[", "self", ".", "rule", ".", "makeaddress", "(", "addr", ")", "]", "[", "'target_obj'", "]" ]
Return the rule object for an address from our deps graph.
[ "Return", "the", "rule", "object", "for", "an", "address", "from", "our", "deps", "graph", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L183-L186
benley/butcher
butcher/targets/base.py
BaseTarget.validate_args
def validate_args(self): """Input validation!""" def validate_name(): allowed_re = '^[a-z](([a-z0-9_-]+)?([a-z0-9])?)?' assert isinstance(self.params['name'], basestring), ( 'Name must be a string, not %s' % repr(self.params['name'])) assert re.match(allowed_re, self.params['name']), ( 'Invalid rule name: %s. Must match %s.' % ( repr(self.params['name']), repr(allowed_re))) validate_name() def validate_deps(): if 'deps' in self.params: assert type(self.params['deps']) in (type(None), list), ( 'Deps must be a list, not %s' % repr(self.params['deps'])) validate_deps()
python
def validate_args(self): """Input validation!""" def validate_name(): allowed_re = '^[a-z](([a-z0-9_-]+)?([a-z0-9])?)?' assert isinstance(self.params['name'], basestring), ( 'Name must be a string, not %s' % repr(self.params['name'])) assert re.match(allowed_re, self.params['name']), ( 'Invalid rule name: %s. Must match %s.' % ( repr(self.params['name']), repr(allowed_re))) validate_name() def validate_deps(): if 'deps' in self.params: assert type(self.params['deps']) in (type(None), list), ( 'Deps must be a list, not %s' % repr(self.params['deps'])) validate_deps()
[ "def", "validate_args", "(", "self", ")", ":", "def", "validate_name", "(", ")", ":", "allowed_re", "=", "'^[a-z](([a-z0-9_-]+)?([a-z0-9])?)?'", "assert", "isinstance", "(", "self", ".", "params", "[", "'name'", "]", ",", "basestring", ")", ",", "(", "'Name mu...
Input validation!
[ "Input", "validation!" ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L267-L282
benley/butcher
butcher/targets/base.py
BaseTarget.composed_deps
def composed_deps(self): """Dependencies of this build target.""" if 'deps' in self.params: param_deps = self.params['deps'] or [] deps = [self.makeaddress(dep) for dep in param_deps] return deps else: return None
python
def composed_deps(self): """Dependencies of this build target.""" if 'deps' in self.params: param_deps = self.params['deps'] or [] deps = [self.makeaddress(dep) for dep in param_deps] return deps else: return None
[ "def", "composed_deps", "(", "self", ")", ":", "if", "'deps'", "in", "self", ".", "params", ":", "param_deps", "=", "self", ".", "params", "[", "'deps'", "]", "or", "[", "]", "deps", "=", "[", "self", ".", "makeaddress", "(", "dep", ")", "for", "de...
Dependencies of this build target.
[ "Dependencies", "of", "this", "build", "target", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L294-L301
benley/butcher
butcher/targets/base.py
BaseTarget.source_files
def source_files(self): """This rule's source files.""" if 'srcs' in self.params and self.params['srcs'] is not None: return util.flatten(self.params['srcs'])
python
def source_files(self): """This rule's source files.""" if 'srcs' in self.params and self.params['srcs'] is not None: return util.flatten(self.params['srcs'])
[ "def", "source_files", "(", "self", ")", ":", "if", "'srcs'", "in", "self", ".", "params", "and", "self", ".", "params", "[", "'srcs'", "]", "is", "not", "None", ":", "return", "util", ".", "flatten", "(", "self", ".", "params", "[", "'srcs'", "]", ...
This rule's source files.
[ "This", "rule", "s", "source", "files", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L304-L307
benley/butcher
butcher/targets/base.py
BaseTarget.makeaddress
def makeaddress(self, label): """Turn a label into an Address with current context. Adds repo and path if given a label that only has a :target part. """ addr = address.new(label) if not addr.repo: addr.repo = self.address.repo if not addr.path: addr.path = self.address.path return addr
python
def makeaddress(self, label): """Turn a label into an Address with current context. Adds repo and path if given a label that only has a :target part. """ addr = address.new(label) if not addr.repo: addr.repo = self.address.repo if not addr.path: addr.path = self.address.path return addr
[ "def", "makeaddress", "(", "self", ",", "label", ")", ":", "addr", "=", "address", ".", "new", "(", "label", ")", "if", "not", "addr", ".", "repo", ":", "addr", ".", "repo", "=", "self", ".", "address", ".", "repo", "if", "not", "addr", ".", "pat...
Turn a label into an Address with current context. Adds repo and path if given a label that only has a :target part.
[ "Turn", "a", "label", "into", "an", "Address", "with", "current", "context", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/base.py#L309-L319
chewse/djangorestframework-signed-permissions
signedpermissions/views.py
SignedViewSetMixin.get_queryset
def get_queryset(self): """Return the allowed queryset for this sign or the default one.""" if 'sign' in self.request.query_params: try: filter_and_actions = unsign_filters_and_actions( self.request.query_params['sign'], '{}.{}'.format( self.queryset.model._meta.app_label, self.queryset.model._meta.model_name, ) ) except signing.BadSignature: return super(SignedViewSetMixin, self).get_queryset() else: for filtered_action in filter_and_actions: try: qs = self.queryset.filter(**filtered_action['filters']) except FieldError: continue return qs return super(SignedViewSetMixin, self).get_queryset()
python
def get_queryset(self): """Return the allowed queryset for this sign or the default one.""" if 'sign' in self.request.query_params: try: filter_and_actions = unsign_filters_and_actions( self.request.query_params['sign'], '{}.{}'.format( self.queryset.model._meta.app_label, self.queryset.model._meta.model_name, ) ) except signing.BadSignature: return super(SignedViewSetMixin, self).get_queryset() else: for filtered_action in filter_and_actions: try: qs = self.queryset.filter(**filtered_action['filters']) except FieldError: continue return qs return super(SignedViewSetMixin, self).get_queryset()
[ "def", "get_queryset", "(", "self", ")", ":", "if", "'sign'", "in", "self", ".", "request", ".", "query_params", ":", "try", ":", "filter_and_actions", "=", "unsign_filters_and_actions", "(", "self", ".", "request", ".", "query_params", "[", "'sign'", "]", "...
Return the allowed queryset for this sign or the default one.
[ "Return", "the", "allowed", "queryset", "for", "this", "sign", "or", "the", "default", "one", "." ]
train
https://github.com/chewse/djangorestframework-signed-permissions/blob/b1cc4c57999fc5be8361f60f0ada1d777b27feab/signedpermissions/views.py#L16-L36
scieloorg/processing
utils.py
slugify
def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ value if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value).strip().lower() return re.sub(r'[-\s]+', '-', value)
python
def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ value if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value).strip().lower() return re.sub(r'[-\s]+', '-', value)
[ "def", "slugify", "(", "value", ",", "allow_unicode", "=", "False", ")", ":", "value", "if", "allow_unicode", ":", "value", "=", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "value", ")", "else", ":", "value", "=", "unicodedata", ".", "normalize", ...
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace.
[ "Convert", "to", "ASCII", "if", "allow_unicode", "is", "False", ".", "Convert", "spaces", "to", "hyphens", ".", "Remove", "characters", "that", "aren", "t", "alphanumerics", "underscores", "or", "hyphens", ".", "Convert", "to", "lowercase", ".", "Also", "strip...
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/utils.py#L39-L52
scieloorg/processing
utils.py
split_date
def split_date(value): """ This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','') """ if not is_valid_date(value): return ('', '', '') splited = value.split('-') try: year = splited[0] except IndexError: year = '' try: month = splited[1] except IndexError: month = '' try: day = splited[2] except IndexError: day = '' return (year, month, day)
python
def split_date(value): """ This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','') """ if not is_valid_date(value): return ('', '', '') splited = value.split('-') try: year = splited[0] except IndexError: year = '' try: month = splited[1] except IndexError: month = '' try: day = splited[2] except IndexError: day = '' return (year, month, day)
[ "def", "split_date", "(", "value", ")", ":", "if", "not", "is_valid_date", "(", "value", ")", ":", "return", "(", "''", ",", "''", ",", "''", ")", "splited", "=", "value", ".", "split", "(", "'-'", ")", "try", ":", "year", "=", "splited", "[", "0...
This method splits a date in a tuple. value: valid iso date ex: 2016-01-31: ('2016','01','01') 2016-01: ('2016','01','') 2016: ('2016','','')
[ "This", "method", "splits", "a", "date", "in", "a", "tuple", ".", "value", ":", "valid", "iso", "date" ]
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/utils.py#L171-L201
scieloorg/processing
utils.py
Configuration.items
def items(self): """Settings as key-value pair. """ return [(section, dict(self.conf.items(section, raw=True))) for \ section in [section for section in self.conf.sections()]]
python
def items(self): """Settings as key-value pair. """ return [(section, dict(self.conf.items(section, raw=True))) for \ section in [section for section in self.conf.sections()]]
[ "def", "items", "(", "self", ")", ":", "return", "[", "(", "section", ",", "dict", "(", "self", ".", "conf", ".", "items", "(", "section", ",", "raw", "=", "True", ")", ")", ")", "for", "section", "in", "[", "section", "for", "section", "in", "se...
Settings as key-value pair.
[ "Settings", "as", "key", "-", "value", "pair", "." ]
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/utils.py#L116-L120
aerogear/digger-build-cli
digger/helpers/android.py
jarsign
def jarsign(storepass, keypass, keystore, source, alias, path=None): """ Uses Jarsign to sign an apk target file using the provided keystore information. :param storepass(str) - keystore storepass :param keypass(str) - keystore keypass :param keystore(str) - keystore file path :param source(str) - apk path :param alias(str) - keystore alias :param path(str) - basedir to run the command """ cmd = [ 'jarsigner', '-verbose', '-storepass', storepass, '-keypass', keypass, '-keystore', keystore, source, alias ] common.run_cmd(cmd, log='jarsign.log', cwd=path)
python
def jarsign(storepass, keypass, keystore, source, alias, path=None): """ Uses Jarsign to sign an apk target file using the provided keystore information. :param storepass(str) - keystore storepass :param keypass(str) - keystore keypass :param keystore(str) - keystore file path :param source(str) - apk path :param alias(str) - keystore alias :param path(str) - basedir to run the command """ cmd = [ 'jarsigner', '-verbose', '-storepass', storepass, '-keypass', keypass, '-keystore', keystore, source, alias ] common.run_cmd(cmd, log='jarsign.log', cwd=path)
[ "def", "jarsign", "(", "storepass", ",", "keypass", ",", "keystore", ",", "source", ",", "alias", ",", "path", "=", "None", ")", ":", "cmd", "=", "[", "'jarsigner'", ",", "'-verbose'", ",", "'-storepass'", ",", "storepass", ",", "'-keypass'", ",", "keypa...
Uses Jarsign to sign an apk target file using the provided keystore information. :param storepass(str) - keystore storepass :param keypass(str) - keystore keypass :param keystore(str) - keystore file path :param source(str) - apk path :param alias(str) - keystore alias :param path(str) - basedir to run the command
[ "Uses", "Jarsign", "to", "sign", "an", "apk", "target", "file", "using", "the", "provided", "keystore", "information", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/helpers/android.py#L8-L31
aerogear/digger-build-cli
digger/helpers/android.py
zipalign
def zipalign(source, dist, build_tool=None, version='4', path=None): """ Uses zipalign based on a provided build tool version (defaulit is 23.0.2). :param source(str) - source apk file to be zipaligned :param dist(str) - zipaligned apk file path to be created :param build_tool(str) - build tool version to be used by zipalign (default is 23.0.2) :param version(str) - zipalign version, default is 4 :param path(str) - basedir to run the command """ if build_tool is None: build_tool = config.build_tool_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) cmd_path = [ android_home, '/build-tools', '/%s' % build_tool, '/zipalign' ] cmd = [ ''.join(cmd_path), '-v', version, source, dist, ] common.run_cmd(cmd, log='zipalign.log', cwd=path)
python
def zipalign(source, dist, build_tool=None, version='4', path=None): """ Uses zipalign based on a provided build tool version (defaulit is 23.0.2). :param source(str) - source apk file to be zipaligned :param dist(str) - zipaligned apk file path to be created :param build_tool(str) - build tool version to be used by zipalign (default is 23.0.2) :param version(str) - zipalign version, default is 4 :param path(str) - basedir to run the command """ if build_tool is None: build_tool = config.build_tool_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) cmd_path = [ android_home, '/build-tools', '/%s' % build_tool, '/zipalign' ] cmd = [ ''.join(cmd_path), '-v', version, source, dist, ] common.run_cmd(cmd, log='zipalign.log', cwd=path)
[ "def", "zipalign", "(", "source", ",", "dist", ",", "build_tool", "=", "None", ",", "version", "=", "'4'", ",", "path", "=", "None", ")", ":", "if", "build_tool", "is", "None", ":", "build_tool", "=", "config", ".", "build_tool_version", "android_home", ...
Uses zipalign based on a provided build tool version (defaulit is 23.0.2). :param source(str) - source apk file to be zipaligned :param dist(str) - zipaligned apk file path to be created :param build_tool(str) - build tool version to be used by zipalign (default is 23.0.2) :param version(str) - zipalign version, default is 4 :param path(str) - basedir to run the command
[ "Uses", "zipalign", "based", "on", "a", "provided", "build", "tool", "version", "(", "defaulit", "is", "23", ".", "0", ".", "2", ")", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/helpers/android.py#L34-L60
aerogear/digger-build-cli
digger/helpers/android.py
get_default_keystore
def get_default_keystore(prefix='AG_'): """ Gets the default keystore information based on environment variables and a prefix. $PREFIX_KEYSTORE_PATH - keystore file path, default is opt/digger/debug.keystore $PREFIX_KEYSTORE_STOREPASS - keystore storepass, default is android $PREFIX_KEYSTORE_KEYPASS - keystore keypass, default is android $PREFIX_KEYSTORE_ALIAS - keystore alias, default is androiddebug :param prefix(str) - A prefix to be used for environment variables, default is AG_. Returns: A tuple containing the keystore information: (path, storepass, keypass, alias) """ path = os.environ.get('%s_KEYSTORE_PATH' % prefix, config.keystore.path) storepass = os.environ.get('%s_KEYSTORE_STOREPASS' % prefix, config.keystore.storepass) keypass = os.environ.get('%s_KEYSTORE_KEYPASS' % prefix, config.keystore.keypass) alias = os.environ.get('%s_KEYSTORE_ALIAS' % prefix, config.keystore.alias) return (path, storepass, keypass, alias)
python
def get_default_keystore(prefix='AG_'): """ Gets the default keystore information based on environment variables and a prefix. $PREFIX_KEYSTORE_PATH - keystore file path, default is opt/digger/debug.keystore $PREFIX_KEYSTORE_STOREPASS - keystore storepass, default is android $PREFIX_KEYSTORE_KEYPASS - keystore keypass, default is android $PREFIX_KEYSTORE_ALIAS - keystore alias, default is androiddebug :param prefix(str) - A prefix to be used for environment variables, default is AG_. Returns: A tuple containing the keystore information: (path, storepass, keypass, alias) """ path = os.environ.get('%s_KEYSTORE_PATH' % prefix, config.keystore.path) storepass = os.environ.get('%s_KEYSTORE_STOREPASS' % prefix, config.keystore.storepass) keypass = os.environ.get('%s_KEYSTORE_KEYPASS' % prefix, config.keystore.keypass) alias = os.environ.get('%s_KEYSTORE_ALIAS' % prefix, config.keystore.alias) return (path, storepass, keypass, alias)
[ "def", "get_default_keystore", "(", "prefix", "=", "'AG_'", ")", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'%s_KEYSTORE_PATH'", "%", "prefix", ",", "config", ".", "keystore", ".", "path", ")", "storepass", "=", "os", ".", "environ", ".", ...
Gets the default keystore information based on environment variables and a prefix. $PREFIX_KEYSTORE_PATH - keystore file path, default is opt/digger/debug.keystore $PREFIX_KEYSTORE_STOREPASS - keystore storepass, default is android $PREFIX_KEYSTORE_KEYPASS - keystore keypass, default is android $PREFIX_KEYSTORE_ALIAS - keystore alias, default is androiddebug :param prefix(str) - A prefix to be used for environment variables, default is AG_. Returns: A tuple containing the keystore information: (path, storepass, keypass, alias)
[ "Gets", "the", "default", "keystore", "information", "based", "on", "environment", "variables", "and", "a", "prefix", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/helpers/android.py#L63-L81
aerogear/digger-build-cli
digger/helpers/android.py
get_highest_build_tool
def get_highest_build_tool(sdk_version=None): """ Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found) """ if sdk_version is None: sdk_version = config.sdk_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) build_tool_folder = '%s/build-tools' % android_home folder_list = os.listdir(build_tool_folder) versions = [folder for folder in folder_list if folder.startswith('%s.' % sdk_version)] if len(versions) == 0: return config.build_tool_version return versions[::-1][0]
python
def get_highest_build_tool(sdk_version=None): """ Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found) """ if sdk_version is None: sdk_version = config.sdk_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) build_tool_folder = '%s/build-tools' % android_home folder_list = os.listdir(build_tool_folder) versions = [folder for folder in folder_list if folder.startswith('%s.' % sdk_version)] if len(versions) == 0: return config.build_tool_version return versions[::-1][0]
[ "def", "get_highest_build_tool", "(", "sdk_version", "=", "None", ")", ":", "if", "sdk_version", "is", "None", ":", "sdk_version", "=", "config", ".", "sdk_version", "android_home", "=", "os", ".", "environ", ".", "get", "(", "'AG_MOBILE_SDK'", ",", "os", "....
Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found)
[ "Gets", "the", "highest", "build", "tool", "version", "based", "on", "major", "version", "sdk", "version", "." ]
train
https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/helpers/android.py#L84-L101
hzdg/django-ecstatic
ecstatic/management/commands/hashmedianames.py
Command.rename_file
def rename_file(self, instance, field_name): """ Renames a file and updates the model field to point to the new file. Returns True if a change has been made; otherwise False """ file = getattr(instance, field_name) if file: new_name = get_hashed_filename(file.name, file) if new_name != file.name: print(' Renaming "%s" to "%s"' % (file.name, new_name)) file.save(os.path.basename(new_name), file, save=False) return True return False
python
def rename_file(self, instance, field_name): """ Renames a file and updates the model field to point to the new file. Returns True if a change has been made; otherwise False """ file = getattr(instance, field_name) if file: new_name = get_hashed_filename(file.name, file) if new_name != file.name: print(' Renaming "%s" to "%s"' % (file.name, new_name)) file.save(os.path.basename(new_name), file, save=False) return True return False
[ "def", "rename_file", "(", "self", ",", "instance", ",", "field_name", ")", ":", "file", "=", "getattr", "(", "instance", ",", "field_name", ")", "if", "file", ":", "new_name", "=", "get_hashed_filename", "(", "file", ".", "name", ",", "file", ")", "if",...
Renames a file and updates the model field to point to the new file. Returns True if a change has been made; otherwise False
[ "Renames", "a", "file", "and", "updates", "the", "model", "field", "to", "point", "to", "the", "new", "file", ".", "Returns", "True", "if", "a", "change", "has", "been", "made", ";", "otherwise", "False" ]
train
https://github.com/hzdg/django-ecstatic/blob/e2b9bd57ae19938449315457b31130c8df831911/ecstatic/management/commands/hashmedianames.py#L29-L44
Synerty/peek-plugin-base
peek_plugin_base/server/PeekPlatformServerHttpHookABC.py
PeekPlatformServerHttpHookABC.addServerResource
def addServerResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootServerResource.putChild(pluginSubPath, resource)
python
def addServerResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootServerResource.putChild(pluginSubPath, resource)
[ "def", "addServerResource", "(", "self", ",", "pluginSubPath", ":", "bytes", ",", "resource", ":", "BasicResource", ")", "->", "None", ":", "pluginSubPath", "=", "pluginSubPath", ".", "strip", "(", "b'/'", ")", "self", ".", "__rootServerResource", ".", "putChi...
Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None
[ "Add", "Server", "Resource" ]
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PeekPlatformServerHttpHookABC.py#L30-L41
cohorte/cohorte-herald
python/run_http.py
main
def main(http_port, peer_name, node_name, app_id): """ Runs the framework :param http_port: HTTP port to listen to :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID """ # Create the framework framework = pelix.framework.create_framework( ('pelix.ipopo.core', 'pelix.ipopo.waiting', 'pelix.shell.core', 'pelix.shell.ipopo', 'pelix.shell.console', 'pelix.http.basic', # Herald core 'herald.core', 'herald.directory', 'herald.shell', # Herald HTTP 'herald.transports.http.directory', 'herald.transports.http.discovery_multicast', 'herald.transports.http.servlet', 'herald.transports.http.transport', # RPC 'pelix.remote.dispatcher', 'pelix.remote.registry', 'herald.remote.discovery', 'herald.remote.herald_xmlrpc',), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) # Start everything framework.start() context = framework.get_bundle_context() # Instantiate components with use_waiting_list(context) as ipopo: # ... HTTP server ipopo.add(pelix.http.FACTORY_HTTP_BASIC, "http-server", {pelix.http.HTTP_SERVICE_PORT: http_port}) # ... HTTP reception servlet ipopo.add(herald.transports.http.FACTORY_SERVLET, "herald-http-servlet") # ... HTTP multicast discovery ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST, "herald-http-discovery-multicast") # Start the framework and wait for it to stop framework.wait_for_stop()
python
def main(http_port, peer_name, node_name, app_id): """ Runs the framework :param http_port: HTTP port to listen to :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID """ # Create the framework framework = pelix.framework.create_framework( ('pelix.ipopo.core', 'pelix.ipopo.waiting', 'pelix.shell.core', 'pelix.shell.ipopo', 'pelix.shell.console', 'pelix.http.basic', # Herald core 'herald.core', 'herald.directory', 'herald.shell', # Herald HTTP 'herald.transports.http.directory', 'herald.transports.http.discovery_multicast', 'herald.transports.http.servlet', 'herald.transports.http.transport', # RPC 'pelix.remote.dispatcher', 'pelix.remote.registry', 'herald.remote.discovery', 'herald.remote.herald_xmlrpc',), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) # Start everything framework.start() context = framework.get_bundle_context() # Instantiate components with use_waiting_list(context) as ipopo: # ... HTTP server ipopo.add(pelix.http.FACTORY_HTTP_BASIC, "http-server", {pelix.http.HTTP_SERVICE_PORT: http_port}) # ... HTTP reception servlet ipopo.add(herald.transports.http.FACTORY_SERVLET, "herald-http-servlet") # ... HTTP multicast discovery ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST, "herald-http-discovery-multicast") # Start the framework and wait for it to stop framework.wait_for_stop()
[ "def", "main", "(", "http_port", ",", "peer_name", ",", "node_name", ",", "app_id", ")", ":", "# Create the framework", "framework", "=", "pelix", ".", "framework", ".", "create_framework", "(", "(", "'pelix.ipopo.core'", ",", "'pelix.ipopo.waiting'", ",", "'pelix...
Runs the framework :param http_port: HTTP port to listen to :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID
[ "Runs", "the", "framework" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/run_http.py#L50-L108
futursolo/magichttp
magichttp/writers.py
BaseHttpStreamWriter.write
def write(self, data: bytes) -> None: """ Write the data. """ if self.finished(): if self._exc: raise self._exc raise WriteAfterFinishedError if not data: return try: self._delegate.write_data(data, finished=False) except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
python
def write(self, data: bytes) -> None: """ Write the data. """ if self.finished(): if self._exc: raise self._exc raise WriteAfterFinishedError if not data: return try: self._delegate.write_data(data, finished=False) except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
[ "def", "write", "(", "self", ",", "data", ":", "bytes", ")", "->", "None", ":", "if", "self", ".", "finished", "(", ")", ":", "if", "self", ".", "_exc", ":", "raise", "self", ".", "_exc", "raise", "WriteAfterFinishedError", "if", "not", "data", ":", ...
Write the data.
[ "Write", "the", "data", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/writers.py#L89-L110
futursolo/magichttp
magichttp/writers.py
BaseHttpStreamWriter.flush
async def flush(self) -> None: """ Give the writer a chance to flush the pending data out of the internal buffer. """ async with self._flush_lock: if self.finished(): if self._exc: raise self._exc return try: await self._delegate.flush_buf() except asyncio.CancelledError: # pragma: no cover raise except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
python
async def flush(self) -> None: """ Give the writer a chance to flush the pending data out of the internal buffer. """ async with self._flush_lock: if self.finished(): if self._exc: raise self._exc return try: await self._delegate.flush_buf() except asyncio.CancelledError: # pragma: no cover raise except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
[ "async", "def", "flush", "(", "self", ")", "->", "None", ":", "async", "with", "self", ".", "_flush_lock", ":", "if", "self", ".", "finished", "(", ")", ":", "if", "self", ".", "_exc", ":", "raise", "self", ".", "_exc", "return", "try", ":", "await...
Give the writer a chance to flush the pending data out of the internal buffer.
[ "Give", "the", "writer", "a", "chance", "to", "flush", "the", "pending", "data", "out", "of", "the", "internal", "buffer", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/writers.py#L112-L135
futursolo/magichttp
magichttp/writers.py
BaseHttpStreamWriter.finish
def finish(self, data: bytes=b"") -> None: """ Finish the stream. """ if self.finished(): if self._exc: raise self._exc if data: raise WriteAfterFinishedError return try: self._delegate.write_data(data, finished=True) except BaseWriteException as e: if self._exc is None: self._exc = e raise finally: self._finished.set()
python
def finish(self, data: bytes=b"") -> None: """ Finish the stream. """ if self.finished(): if self._exc: raise self._exc if data: raise WriteAfterFinishedError return try: self._delegate.write_data(data, finished=True) except BaseWriteException as e: if self._exc is None: self._exc = e raise finally: self._finished.set()
[ "def", "finish", "(", "self", ",", "data", ":", "bytes", "=", "b\"\"", ")", "->", "None", ":", "if", "self", ".", "finished", "(", ")", ":", "if", "self", ".", "_exc", ":", "raise", "self", ".", "_exc", "if", "data", ":", "raise", "WriteAfterFinish...
Finish the stream.
[ "Finish", "the", "stream", "." ]
train
https://github.com/futursolo/magichttp/blob/84445d21d6829a43132da6d50a72501739d64ca4/magichttp/writers.py#L137-L160
clinicedc/edc-notification
edc_notification/decorators.py
register
def register(**kwargs): """Registers a notification_cls. """ def _wrapper(notification_cls): if not issubclass(notification_cls, (Notification,)): raise RegisterNotificationError( f"Wrapped class must be a 'Notification' class. " f"Got '{notification_cls.__name__}'" ) site_notifications.register(notification_cls=notification_cls) return notification_cls return _wrapper
python
def register(**kwargs): """Registers a notification_cls. """ def _wrapper(notification_cls): if not issubclass(notification_cls, (Notification,)): raise RegisterNotificationError( f"Wrapped class must be a 'Notification' class. " f"Got '{notification_cls.__name__}'" ) site_notifications.register(notification_cls=notification_cls) return notification_cls return _wrapper
[ "def", "register", "(", "*", "*", "kwargs", ")", ":", "def", "_wrapper", "(", "notification_cls", ")", ":", "if", "not", "issubclass", "(", "notification_cls", ",", "(", "Notification", ",", ")", ")", ":", "raise", "RegisterNotificationError", "(", "f\"Wrapp...
Registers a notification_cls.
[ "Registers", "a", "notification_cls", "." ]
train
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/decorators.py#L9-L23
anthonynguyen/pyrcon
pyrcon/rcon.py
RConnection.recvall
def recvall(self, timeout=0.5): """ Receive the RCON command response :param timeout: The timeout between consequent data receive :return str: The RCON command response with header stripped out """ response = '' self.socket.setblocking(False) start = time.time() while True: if response and time.time() - start > timeout: break elif time.time() - start > timeout * 2: break try: data = self.socket.recv(4096) if data: response += data.replace(self._rconreplystring, '') start = time.time() else: time.sleep(0.1) except socket.error: pass return response.strip()
python
def recvall(self, timeout=0.5): """ Receive the RCON command response :param timeout: The timeout between consequent data receive :return str: The RCON command response with header stripped out """ response = '' self.socket.setblocking(False) start = time.time() while True: if response and time.time() - start > timeout: break elif time.time() - start > timeout * 2: break try: data = self.socket.recv(4096) if data: response += data.replace(self._rconreplystring, '') start = time.time() else: time.sleep(0.1) except socket.error: pass return response.strip()
[ "def", "recvall", "(", "self", ",", "timeout", "=", "0.5", ")", ":", "response", "=", "''", "self", ".", "socket", ".", "setblocking", "(", "False", ")", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "if", "response", "and", "t...
Receive the RCON command response :param timeout: The timeout between consequent data receive :return str: The RCON command response with header stripped out
[ "Receive", "the", "RCON", "command", "response", ":", "param", "timeout", ":", "The", "timeout", "between", "consequent", "data", "receive", ":", "return", "str", ":", "The", "RCON", "command", "response", "with", "header", "stripped", "out" ]
train
https://github.com/anthonynguyen/pyrcon/blob/278cba95dd4d53a347d37acfce556ad375370e15/pyrcon/rcon.py#L97-L122
anthonynguyen/pyrcon
pyrcon/rcon.py
RConnection.send
def send(self, data): """ Send a RCON command over the socket :param data: The command to send :raise RconError: When it's not possible to evaluate the command :return str: The server response to the RCON command """ try: if not data: raise RconError('no command supplied') with self.lock: self.socket.send(self._rconsendstring.format(self.password, data)) except socket.error, e: raise RconError(e.message, e) else: timeout = self._timeout command = data.split(' ')[0] if command in self._long_commands_timeout: timeout = self._long_commands_timeout[command] return self.recvall(timeout=timeout)
python
def send(self, data): """ Send a RCON command over the socket :param data: The command to send :raise RconError: When it's not possible to evaluate the command :return str: The server response to the RCON command """ try: if not data: raise RconError('no command supplied') with self.lock: self.socket.send(self._rconsendstring.format(self.password, data)) except socket.error, e: raise RconError(e.message, e) else: timeout = self._timeout command = data.split(' ')[0] if command in self._long_commands_timeout: timeout = self._long_commands_timeout[command] return self.recvall(timeout=timeout)
[ "def", "send", "(", "self", ",", "data", ")", ":", "try", ":", "if", "not", "data", ":", "raise", "RconError", "(", "'no command supplied'", ")", "with", "self", ".", "lock", ":", "self", ".", "socket", ".", "send", "(", "self", ".", "_rconsendstring",...
Send a RCON command over the socket :param data: The command to send :raise RconError: When it's not possible to evaluate the command :return str: The server response to the RCON command
[ "Send", "a", "RCON", "command", "over", "the", "socket", ":", "param", "data", ":", "The", "command", "to", "send", ":", "raise", "RconError", ":", "When", "it", "s", "not", "possible", "to", "evaluate", "the", "command", ":", "return", "str", ":", "Th...
train
https://github.com/anthonynguyen/pyrcon/blob/278cba95dd4d53a347d37acfce556ad375370e15/pyrcon/rcon.py#L124-L143
mozilla/socorrolib
socorrolib/app/socorro_app.py
klass_to_pypath
def klass_to_pypath(klass): """when a class is defined within the module that is being executed as main, the module name will be specified as '__main__' even though the module actually had its own real name. This ends up being very confusing to Configman as it tries to refer to a class by its proper module name. This function will convert a class into its properly qualified actual pathname. This method is used when a Socorro app is actually invoked directly through the file in which the App class is defined. This allows configman to reimport the class under its proper name and treat it as if it had been run through the SocorroWelcomeApp. In turn, this allows the application defaults to be fetched from the properly imported class in time for configman use that information as value source.""" if klass.__module__ == '__main__': module_path = ( sys.modules['__main__'] .__file__[:-3] ) module_name = '' for a_python_path in sys.path: tentative_pathname = module_path.replace(a_python_path, '') if tentative_pathname != module_path: module_name = ( tentative_pathname.replace('/', '.').strip('.') ) break if module_name == '': return py_obj_to_str(klass) else: module_name = klass.__module__ return "%s.%s" % (module_name, klass.__name__)
python
def klass_to_pypath(klass): """when a class is defined within the module that is being executed as main, the module name will be specified as '__main__' even though the module actually had its own real name. This ends up being very confusing to Configman as it tries to refer to a class by its proper module name. This function will convert a class into its properly qualified actual pathname. This method is used when a Socorro app is actually invoked directly through the file in which the App class is defined. This allows configman to reimport the class under its proper name and treat it as if it had been run through the SocorroWelcomeApp. In turn, this allows the application defaults to be fetched from the properly imported class in time for configman use that information as value source.""" if klass.__module__ == '__main__': module_path = ( sys.modules['__main__'] .__file__[:-3] ) module_name = '' for a_python_path in sys.path: tentative_pathname = module_path.replace(a_python_path, '') if tentative_pathname != module_path: module_name = ( tentative_pathname.replace('/', '.').strip('.') ) break if module_name == '': return py_obj_to_str(klass) else: module_name = klass.__module__ return "%s.%s" % (module_name, klass.__name__)
[ "def", "klass_to_pypath", "(", "klass", ")", ":", "if", "klass", ".", "__module__", "==", "'__main__'", ":", "module_path", "=", "(", "sys", ".", "modules", "[", "'__main__'", "]", ".", "__file__", "[", ":", "-", "3", "]", ")", "module_name", "=", "''"...
when a class is defined within the module that is being executed as main, the module name will be specified as '__main__' even though the module actually had its own real name. This ends up being very confusing to Configman as it tries to refer to a class by its proper module name. This function will convert a class into its properly qualified actual pathname. This method is used when a Socorro app is actually invoked directly through the file in which the App class is defined. This allows configman to reimport the class under its proper name and treat it as if it had been run through the SocorroWelcomeApp. In turn, this allows the application defaults to be fetched from the properly imported class in time for configman use that information as value source.
[ "when", "a", "class", "is", "defined", "within", "the", "module", "that", "is", "being", "executed", "as", "main", "the", "module", "name", "will", "be", "specified", "as", "__main__", "even", "though", "the", "module", "actually", "had", "its", "own", "re...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/app/socorro_app.py#L105-L134
benley/butcher
butcher/address.py
Address.__parse_target
def __parse_target(targetstr, current_repo=None): """Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname} """ # 'blah' -> ':blah' if not (':' in targetstr or '/' in targetstr): targetstr = ':%s' % targetstr match = re.match( r'^(?://(?P<repo>[\w-]+)(?:\[(?P<git_ref>.*)\])?)?' r'(?:$|/?(?P<path>[\w/-]+)?(?::?(?P<target>[\w-]+)?))', targetstr) try: groups = match.groupdict() if not groups['repo']: groups['repo'] = current_repo if not groups['git_ref']: groups['git_ref'] = 'develop' if not groups['target']: groups['target'] = 'all' if not groups['path']: groups['path'] = '' except AttributeError: raise error.ButcherError('"%s" is not a valid build target.') #log.debug('parse_target: %s -> %s', targetstr, groups) return groups
python
def __parse_target(targetstr, current_repo=None): """Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname} """ # 'blah' -> ':blah' if not (':' in targetstr or '/' in targetstr): targetstr = ':%s' % targetstr match = re.match( r'^(?://(?P<repo>[\w-]+)(?:\[(?P<git_ref>.*)\])?)?' r'(?:$|/?(?P<path>[\w/-]+)?(?::?(?P<target>[\w-]+)?))', targetstr) try: groups = match.groupdict() if not groups['repo']: groups['repo'] = current_repo if not groups['git_ref']: groups['git_ref'] = 'develop' if not groups['target']: groups['target'] = 'all' if not groups['path']: groups['path'] = '' except AttributeError: raise error.ButcherError('"%s" is not a valid build target.') #log.debug('parse_target: %s -> %s', targetstr, groups) return groups
[ "def", "__parse_target", "(", "targetstr", ",", "current_repo", "=", "None", ")", ":", "# 'blah' -> ':blah'", "if", "not", "(", "':'", "in", "targetstr", "or", "'/'", "in", "targetstr", ")", ":", "targetstr", "=", "':%s'", "%", "targetstr", "match", "=", "...
Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname}
[ "Parse", "a", "build", "target", "string", "." ]
train
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/address.py#L115-L158
jbrudvik/yahooscraper
yahooscraper/login.py
authenticated_session
def authenticated_session(username, password): """ Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails. """ session = requests.Session() session.headers.update(headers()) response = session.get(url()) login_path = path(response.text) login_url = urljoin(response.url, login_path) login_post_data = post_data(response.text, username, password) response = session.post(login_url, data=login_post_data) if response.headers['connection'] == 'close': raise Exception('Authencation failed') return session
python
def authenticated_session(username, password): """ Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails. """ session = requests.Session() session.headers.update(headers()) response = session.get(url()) login_path = path(response.text) login_url = urljoin(response.url, login_path) login_post_data = post_data(response.text, username, password) response = session.post(login_url, data=login_post_data) if response.headers['connection'] == 'close': raise Exception('Authencation failed') return session
[ "def", "authenticated_session", "(", "username", ",", "password", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "headers", ".", "update", "(", "headers", "(", ")", ")", "response", "=", "session", ".", "get", "(", "url",...
Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails.
[ "Given", "username", "and", "password", "return", "an", "authenticated", "Yahoo", "requests", "session", "that", "can", "be", "used", "for", "further", "scraping", "requests", "." ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/login.py#L18-L37
jbrudvik/yahooscraper
yahooscraper/login.py
post_data
def post_data(page, username, password): """ Given username and password, return the post data necessary for login """ soup = BeautifulSoup(page) try: inputs = soup.find(id='hiddens').findAll('input') post_data = {input['name']: input['value'] for input in inputs} post_data['username'] = username post_data['passwd'] = password return post_data except: return None
python
def post_data(page, username, password): """ Given username and password, return the post data necessary for login """ soup = BeautifulSoup(page) try: inputs = soup.find(id='hiddens').findAll('input') post_data = {input['name']: input['value'] for input in inputs} post_data['username'] = username post_data['passwd'] = password return post_data except: return None
[ "def", "post_data", "(", "page", ",", "username", ",", "password", ")", ":", "soup", "=", "BeautifulSoup", "(", "page", ")", "try", ":", "inputs", "=", "soup", ".", "find", "(", "id", "=", "'hiddens'", ")", ".", "findAll", "(", "'input'", ")", "post_...
Given username and password, return the post data necessary for login
[ "Given", "username", "and", "password", "return", "the", "post", "data", "necessary", "for", "login" ]
train
https://github.com/jbrudvik/yahooscraper/blob/e880323fea0dd25f03410eea9d088760ba7c3528/yahooscraper/login.py#L67-L79
duniter/duniter-python-api
duniterpy/documents/certification.py
Certification.from_signed_raw
def from_signed_raw(cls: Type[CertificationType], signed_raw: str) -> CertificationType: """ Return Certification instance from signed raw document :param signed_raw: Signed raw document :return: """ n = 0 lines = signed_raw.splitlines(True) version = int(Identity.parse_field("Version", lines[n])) n += 1 Certification.parse_field("Type", lines[n]) n += 1 currency = Certification.parse_field("Currency", lines[n]) n += 1 pubkey_from = Certification.parse_field("Issuer", lines[n]) n += 1 identity_pubkey = Certification.parse_field("IdtyIssuer", lines[n]) n += 1 identity_uid = Certification.parse_field("IdtyUniqueID", lines[n]) n += 1 identity_timestamp = BlockUID.from_str(Certification.parse_field("IdtyTimestamp", lines[n])) n += 1 identity_signature = Certification.parse_field("IdtySignature", lines[n]) n += 1 timestamp = BlockUID.from_str(Certification.parse_field("CertTimestamp", lines[n])) n += 1 signature = Certification.parse_field("Signature", lines[n]) identity = Identity(version, currency, identity_pubkey, identity_uid, identity_timestamp, identity_signature) return cls(version, currency, pubkey_from, identity, timestamp, signature)
python
def from_signed_raw(cls: Type[CertificationType], signed_raw: str) -> CertificationType: """ Return Certification instance from signed raw document :param signed_raw: Signed raw document :return: """ n = 0 lines = signed_raw.splitlines(True) version = int(Identity.parse_field("Version", lines[n])) n += 1 Certification.parse_field("Type", lines[n]) n += 1 currency = Certification.parse_field("Currency", lines[n]) n += 1 pubkey_from = Certification.parse_field("Issuer", lines[n]) n += 1 identity_pubkey = Certification.parse_field("IdtyIssuer", lines[n]) n += 1 identity_uid = Certification.parse_field("IdtyUniqueID", lines[n]) n += 1 identity_timestamp = BlockUID.from_str(Certification.parse_field("IdtyTimestamp", lines[n])) n += 1 identity_signature = Certification.parse_field("IdtySignature", lines[n]) n += 1 timestamp = BlockUID.from_str(Certification.parse_field("CertTimestamp", lines[n])) n += 1 signature = Certification.parse_field("Signature", lines[n]) identity = Identity(version, currency, identity_pubkey, identity_uid, identity_timestamp, identity_signature) return cls(version, currency, pubkey_from, identity, timestamp, signature)
[ "def", "from_signed_raw", "(", "cls", ":", "Type", "[", "CertificationType", "]", ",", "signed_raw", ":", "str", ")", "->", "CertificationType", ":", "n", "=", "0", "lines", "=", "signed_raw", ".", "splitlines", "(", "True", ")", "version", "=", "int", "...
Return Certification instance from signed raw document :param signed_raw: Signed raw document :return:
[ "Return", "Certification", "instance", "from", "signed", "raw", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/certification.py#L65-L106
duniter/duniter-python-api
duniterpy/documents/certification.py
Certification.from_inline
def from_inline(cls: Type[CertificationType], version: int, currency: str, blockhash: Optional[str], inline: str) -> CertificationType: """ Return Certification instance from inline document Only self.pubkey_to is populated. You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods :param version: Version of document :param currency: Name of the currency :param blockhash: Hash of the block :param inline: Inline document :return: """ cert_data = Certification.re_inline.match(inline) if cert_data is None: raise MalformedDocumentError("Certification ({0})".format(inline)) pubkey_from = cert_data.group(1) pubkey_to = cert_data.group(2) blockid = int(cert_data.group(3)) if blockid == 0 or blockhash is None: timestamp = BlockUID.empty() else: timestamp = BlockUID(blockid, blockhash) signature = cert_data.group(4) return cls(version, currency, pubkey_from, pubkey_to, timestamp, signature)
python
def from_inline(cls: Type[CertificationType], version: int, currency: str, blockhash: Optional[str], inline: str) -> CertificationType: """ Return Certification instance from inline document Only self.pubkey_to is populated. You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods :param version: Version of document :param currency: Name of the currency :param blockhash: Hash of the block :param inline: Inline document :return: """ cert_data = Certification.re_inline.match(inline) if cert_data is None: raise MalformedDocumentError("Certification ({0})".format(inline)) pubkey_from = cert_data.group(1) pubkey_to = cert_data.group(2) blockid = int(cert_data.group(3)) if blockid == 0 or blockhash is None: timestamp = BlockUID.empty() else: timestamp = BlockUID(blockid, blockhash) signature = cert_data.group(4) return cls(version, currency, pubkey_from, pubkey_to, timestamp, signature)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "CertificationType", "]", ",", "version", ":", "int", ",", "currency", ":", "str", ",", "blockhash", ":", "Optional", "[", "str", "]", ",", "inline", ":", "str", ")", "->", "CertificationType", ":", "...
Return Certification instance from inline document Only self.pubkey_to is populated. You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods :param version: Version of document :param currency: Name of the currency :param blockhash: Hash of the block :param inline: Inline document :return:
[ "Return", "Certification", "instance", "from", "inline", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/certification.py#L109-L135
duniter/duniter-python-api
duniterpy/documents/certification.py
Certification.raw
def raw(self) -> str: """ Return a raw document of the certification """ if not isinstance(self.identity, Identity): raise MalformedDocumentError("Can not return full certification document created from inline") return """Version: {version} Type: Certification Currency: {currency} Issuer: {issuer} IdtyIssuer: {certified_pubkey} IdtyUniqueID: {certified_uid} IdtyTimestamp: {certified_ts} IdtySignature: {certified_signature} CertTimestamp: {timestamp} """.format(version=self.version, currency=self.currency, issuer=self.pubkey_from, certified_pubkey=self.identity.pubkey, certified_uid=self.identity.uid, certified_ts=self.identity.timestamp, certified_signature=self.identity.signatures[0], timestamp=self.timestamp)
python
def raw(self) -> str: """ Return a raw document of the certification """ if not isinstance(self.identity, Identity): raise MalformedDocumentError("Can not return full certification document created from inline") return """Version: {version} Type: Certification Currency: {currency} Issuer: {issuer} IdtyIssuer: {certified_pubkey} IdtyUniqueID: {certified_uid} IdtyTimestamp: {certified_ts} IdtySignature: {certified_signature} CertTimestamp: {timestamp} """.format(version=self.version, currency=self.currency, issuer=self.pubkey_from, certified_pubkey=self.identity.pubkey, certified_uid=self.identity.uid, certified_ts=self.identity.timestamp, certified_signature=self.identity.signatures[0], timestamp=self.timestamp)
[ "def", "raw", "(", "self", ")", "->", "str", ":", "if", "not", "isinstance", "(", "self", ".", "identity", ",", "Identity", ")", ":", "raise", "MalformedDocumentError", "(", "\"Can not return full certification document created from inline\"", ")", "return", "\"\"\"...
Return a raw document of the certification
[ "Return", "a", "raw", "document", "of", "the", "certification" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/certification.py#L137-L160
duniter/duniter-python-api
duniterpy/documents/certification.py
Certification.inline
def inline(self) -> str: """ Return inline document string :return: """ return "{0}:{1}:{2}:{3}".format(self.pubkey_from, self.pubkey_to, self.timestamp.number, self.signatures[0])
python
def inline(self) -> str: """ Return inline document string :return: """ return "{0}:{1}:{2}:{3}".format(self.pubkey_from, self.pubkey_to, self.timestamp.number, self.signatures[0])
[ "def", "inline", "(", "self", ")", "->", "str", ":", "return", "\"{0}:{1}:{2}:{3}\"", ".", "format", "(", "self", ".", "pubkey_from", ",", "self", ".", "pubkey_to", ",", "self", ".", "timestamp", ".", "number", ",", "self", ".", "signatures", "[", "0", ...
Return inline document string :return:
[ "Return", "inline", "document", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/certification.py#L193-L200
ShawnClake/Apitax
apitax/ah/api/controllers/migrations/users_controller.py
command
def command(execute=None): # noqa: E501 """Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response """ if connexion.request.is_json: execute = Execute.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
python
def command(execute=None): # noqa: E501 """Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response """ if connexion.request.is_json: execute = Execute.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
[ "def", "command", "(", "execute", "=", "None", ")", ":", "# noqa: E501", "if", "connexion", ".", "request", ".", "is_json", ":", "execute", "=", "Execute", ".", "from_dict", "(", "connexion", ".", "request", ".", "get_json", "(", ")", ")", "# noqa: E501", ...
Execute a Command Execute a command # noqa: E501 :param execute: The data needed to execute this command :type execute: dict | bytes :rtype: Response
[ "Execute", "a", "Command" ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/migrations/users_controller.py#L11-L23
ShawnClake/Apitax
apitax/ah/api/controllers/migrations/users_controller.py
endpoint_catalog
def endpoint_catalog(catalog=None): # noqa: E501 """Retrieve the endpoint catalog Retrieve the endpoint catalog # noqa: E501 :param catalog: The data needed to get a catalog :type catalog: dict | bytes :rtype: Response """ if connexion.request.is_json: catalog = UserAuth.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
python
def endpoint_catalog(catalog=None): # noqa: E501 """Retrieve the endpoint catalog Retrieve the endpoint catalog # noqa: E501 :param catalog: The data needed to get a catalog :type catalog: dict | bytes :rtype: Response """ if connexion.request.is_json: catalog = UserAuth.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
[ "def", "endpoint_catalog", "(", "catalog", "=", "None", ")", ":", "# noqa: E501", "if", "connexion", ".", "request", ".", "is_json", ":", "catalog", "=", "UserAuth", ".", "from_dict", "(", "connexion", ".", "request", ".", "get_json", "(", ")", ")", "# noq...
Retrieve the endpoint catalog Retrieve the endpoint catalog # noqa: E501 :param catalog: The data needed to get a catalog :type catalog: dict | bytes :rtype: Response
[ "Retrieve", "the", "endpoint", "catalog" ]
train
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/migrations/users_controller.py#L37-L49
chrlie/frogsay
src/frogsay/client.py
Client.frog_tip
def frog_tip(self): """\ Return a single FROG tip. """ cache = self._cache client = self._client if self.should_refresh: tips = client.croak() for number, tip in tips.items(): cache[str(number)] = tip choice = random.choice(list(cache.keys())) # We'll get a bytes() object here during real usage # but a text-like object in the tests. Good job Python try: tip = cache[choice].decode() except AttributeError: tip = cache[choice] del cache[choice] return tip
python
def frog_tip(self): """\ Return a single FROG tip. """ cache = self._cache client = self._client if self.should_refresh: tips = client.croak() for number, tip in tips.items(): cache[str(number)] = tip choice = random.choice(list(cache.keys())) # We'll get a bytes() object here during real usage # but a text-like object in the tests. Good job Python try: tip = cache[choice].decode() except AttributeError: tip = cache[choice] del cache[choice] return tip
[ "def", "frog_tip", "(", "self", ")", ":", "cache", "=", "self", ".", "_cache", "client", "=", "self", ".", "_client", "if", "self", ".", "should_refresh", ":", "tips", "=", "client", ".", "croak", "(", ")", "for", "number", ",", "tip", "in", "tips", ...
\ Return a single FROG tip.
[ "\\", "Return", "a", "single", "FROG", "tip", "." ]
train
https://github.com/chrlie/frogsay/blob/1c21e1401dc24719732218af830d34b842ab10b9/src/frogsay/client.py#L55-L78
hangyan/shaw
shaw/django/queryset.py
queryset_iterator
def queryset_iterator(queryset, chunksize=1000): ''''' Iterate over a Django Queryset ordered by the primary key This method loads a maximum of chunksize (default: 1000) rows in it's memory at the same time while django normally would load all rows in it's memory. Using the iterator() method only causes it to not preload all the classes. Note that the implementation of the iterator does not support ordered query sets. Link: https://djangosnippets.org/snippets/1949/ ''' pk = 0 last_pk = queryset.order_by('-pk').values_list('pk', flat=True).first() if last_pk is not None: queryset = queryset.order_by('pk') while pk < last_pk: for row in queryset.filter(pk__gt=pk)[:chunksize]: pk = row.pk yield row gc.collect()
python
def queryset_iterator(queryset, chunksize=1000): ''''' Iterate over a Django Queryset ordered by the primary key This method loads a maximum of chunksize (default: 1000) rows in it's memory at the same time while django normally would load all rows in it's memory. Using the iterator() method only causes it to not preload all the classes. Note that the implementation of the iterator does not support ordered query sets. Link: https://djangosnippets.org/snippets/1949/ ''' pk = 0 last_pk = queryset.order_by('-pk').values_list('pk', flat=True).first() if last_pk is not None: queryset = queryset.order_by('pk') while pk < last_pk: for row in queryset.filter(pk__gt=pk)[:chunksize]: pk = row.pk yield row gc.collect()
[ "def", "queryset_iterator", "(", "queryset", ",", "chunksize", "=", "1000", ")", ":", "pk", "=", "0", "last_pk", "=", "queryset", ".", "order_by", "(", "'-pk'", ")", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ".", "first", "(", "...
Iterate over a Django Queryset ordered by the primary key This method loads a maximum of chunksize (default: 1000) rows in it's memory at the same time while django normally would load all rows in it's memory. Using the iterator() method only causes it to not preload all the classes. Note that the implementation of the iterator does not support ordered query sets. Link: https://djangosnippets.org/snippets/1949/
[ "Iterate", "over", "a", "Django", "Queryset", "ordered", "by", "the", "primary", "key" ]
train
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/django/queryset.py#L9-L32
marteinn/AtomicPress
atomicpress/utils/ftpsync/ftpsync.py
FtpSync.remove_dir
def remove_dir(self, destination_path): """ Remove folder. Based on https://gist.github.com/artlogic/2632647. """ wd = self.conn.pwd() try: names = self.conn.nlst(destination_path) except ftplib.all_errors as e: # some FTP servers complain when you try and list non-existent paths logger.debug('FtpRmTree: Could not remove {0}: {1}'.format( destination_path, e)) return for name in names: if os.path.split(name)[1] in ('.', '..'): continue try: self.conn.cwd(name) # if we can cwd to it, it's a folder self.conn.cwd(wd) # don't try a nuke a folder we're in self.remove_dir(name) except ftplib.all_errors: self.conn.delete(name) try: self.conn.rmd(destination_path) except ftplib.all_errors as e: logger.debug('remove_dir: Could not remove {0}: {1}'.format( destination_path, e))
python
def remove_dir(self, destination_path): """ Remove folder. Based on https://gist.github.com/artlogic/2632647. """ wd = self.conn.pwd() try: names = self.conn.nlst(destination_path) except ftplib.all_errors as e: # some FTP servers complain when you try and list non-existent paths logger.debug('FtpRmTree: Could not remove {0}: {1}'.format( destination_path, e)) return for name in names: if os.path.split(name)[1] in ('.', '..'): continue try: self.conn.cwd(name) # if we can cwd to it, it's a folder self.conn.cwd(wd) # don't try a nuke a folder we're in self.remove_dir(name) except ftplib.all_errors: self.conn.delete(name) try: self.conn.rmd(destination_path) except ftplib.all_errors as e: logger.debug('remove_dir: Could not remove {0}: {1}'.format( destination_path, e))
[ "def", "remove_dir", "(", "self", ",", "destination_path", ")", ":", "wd", "=", "self", ".", "conn", ".", "pwd", "(", ")", "try", ":", "names", "=", "self", ".", "conn", ".", "nlst", "(", "destination_path", ")", "except", "ftplib", ".", "all_errors", ...
Remove folder. Based on https://gist.github.com/artlogic/2632647.
[ "Remove", "folder", ".", "Based", "on", "https", ":", "//", "gist", ".", "github", ".", "com", "/", "artlogic", "/", "2632647", "." ]
train
https://github.com/marteinn/AtomicPress/blob/b8a0ca9c9c327f062833fc4a401a8ac0baccf6d1/atomicpress/utils/ftpsync/ftpsync.py#L29-L59
etcher-be/epab
epab/__main__.py
cli
def cli(dirty, stash): """ This is a tool that handles all the tasks to build a Python application This tool is installed as a setuptools entry point, which means it should be accessible from your terminal once this application is installed in develop mode. """ _setup_logging() LOGGER.info('EPAB %s', __version__) LOGGER.info('Running in %s', os.getcwd()) CTX.repo = epab.utils.Repo() CTX.repo.ensure() CTX.stash = stash for filename in _GIT_IGNORE: epab.utils.add_to_gitignore(filename) if not dirty and CTX.repo.is_dirty(): LOGGER.error('Repository is dirty') sys.exit(-1)
python
def cli(dirty, stash): """ This is a tool that handles all the tasks to build a Python application This tool is installed as a setuptools entry point, which means it should be accessible from your terminal once this application is installed in develop mode. """ _setup_logging() LOGGER.info('EPAB %s', __version__) LOGGER.info('Running in %s', os.getcwd()) CTX.repo = epab.utils.Repo() CTX.repo.ensure() CTX.stash = stash for filename in _GIT_IGNORE: epab.utils.add_to_gitignore(filename) if not dirty and CTX.repo.is_dirty(): LOGGER.error('Repository is dirty') sys.exit(-1)
[ "def", "cli", "(", "dirty", ",", "stash", ")", ":", "_setup_logging", "(", ")", "LOGGER", ".", "info", "(", "'EPAB %s'", ",", "__version__", ")", "LOGGER", ".", "info", "(", "'Running in %s'", ",", "os", ".", "getcwd", "(", ")", ")", "CTX", ".", "rep...
This is a tool that handles all the tasks to build a Python application This tool is installed as a setuptools entry point, which means it should be accessible from your terminal once this application is installed in develop mode.
[ "This", "is", "a", "tool", "that", "handles", "all", "the", "tasks", "to", "build", "a", "Python", "application" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/__main__.py#L53-L72
clusterpoint/python-client-api
pycps/response.py
_handle_response
def _handle_response(response, command, id_xpath='./id', **kwargs): """ Initialize the corect Response object from the response string based on the API command type. """ _response_switch = { 'insert': ModifyResponse, 'replace': ModifyResponse, 'partial-replace': ModifyResponse, 'update': ModifyResponse, 'delete': ModifyResponse, 'search-delete': SearchDeleteResponse, 'reindex': Response, 'backup': Response, 'restore': Response, 'clear': Response, 'status': StatusResponse, 'search': SearchResponse, 'retrieve': ListResponse, 'similar': ListResponse, 'lookup': LookupResponse, 'alternatives': AlternativesResponse, 'list-words': WordsResponse, 'list-first': ListResponse, 'list-last': ListResponse, 'retrieve-last': ListResponse, 'retrieve-first': ListResponse, 'show-history': None, 'list-paths': ListPathsResponse, 'list-facets': ListFacetsResponse} try: request_class = _response_switch[command] except KeyError: request_class = Response return request_class(response, id_xpath, **kwargs)
python
def _handle_response(response, command, id_xpath='./id', **kwargs): """ Initialize the corect Response object from the response string based on the API command type. """ _response_switch = { 'insert': ModifyResponse, 'replace': ModifyResponse, 'partial-replace': ModifyResponse, 'update': ModifyResponse, 'delete': ModifyResponse, 'search-delete': SearchDeleteResponse, 'reindex': Response, 'backup': Response, 'restore': Response, 'clear': Response, 'status': StatusResponse, 'search': SearchResponse, 'retrieve': ListResponse, 'similar': ListResponse, 'lookup': LookupResponse, 'alternatives': AlternativesResponse, 'list-words': WordsResponse, 'list-first': ListResponse, 'list-last': ListResponse, 'retrieve-last': ListResponse, 'retrieve-first': ListResponse, 'show-history': None, 'list-paths': ListPathsResponse, 'list-facets': ListFacetsResponse} try: request_class = _response_switch[command] except KeyError: request_class = Response return request_class(response, id_xpath, **kwargs)
[ "def", "_handle_response", "(", "response", ",", "command", ",", "id_xpath", "=", "'./id'", ",", "*", "*", "kwargs", ")", ":", "_response_switch", "=", "{", "'insert'", ":", "ModifyResponse", ",", "'replace'", ":", "ModifyResponse", ",", "'partial-replace'", "...
Initialize the corect Response object from the response string based on the API command type.
[ "Initialize", "the", "corect", "Response", "object", "from", "the", "response", "string", "based", "on", "the", "API", "command", "type", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L44-L75
clusterpoint/python-client-api
pycps/response.py
Response._parse_for_errors
def _parse_for_errors(self): """ Look for an error tag and raise APIError for fatal errors or APIWarning for nonfatal ones. """ error = self._response.find('{www.clusterpoint.com}error') if error is not None: if error.find('level').text.lower() in ('rejected', 'failed', 'error', 'fatal'): raise APIError(error) else: warnings.warn(APIWarning(error))
python
def _parse_for_errors(self): """ Look for an error tag and raise APIError for fatal errors or APIWarning for nonfatal ones. """ error = self._response.find('{www.clusterpoint.com}error') if error is not None: if error.find('level').text.lower() in ('rejected', 'failed', 'error', 'fatal'): raise APIError(error) else: warnings.warn(APIWarning(error))
[ "def", "_parse_for_errors", "(", "self", ")", ":", "error", "=", "self", ".", "_response", ".", "find", "(", "'{www.clusterpoint.com}error'", ")", "if", "error", "is", "not", "None", ":", "if", "error", ".", "find", "(", "'level'", ")", ".", "text", ".",...
Look for an error tag and raise APIError for fatal errors or APIWarning for nonfatal ones.
[ "Look", "for", "an", "error", "tag", "and", "raise", "APIError", "for", "fatal", "errors", "or", "APIWarning", "for", "nonfatal", "ones", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L112-L119
clusterpoint/python-client-api
pycps/response.py
Response.get_content_string
def get_content_string(self): """ Ge thet Clusterpoint response's content as a string. """ return ''.join([ET.tostring(element, encoding="utf-8", method="xml") for element in list(self._content)])
python
def get_content_string(self): """ Ge thet Clusterpoint response's content as a string. """ return ''.join([ET.tostring(element, encoding="utf-8", method="xml") for element in list(self._content)])
[ "def", "get_content_string", "(", "self", ")", ":", "return", "''", ".", "join", "(", "[", "ET", ".", "tostring", "(", "element", ",", "encoding", "=", "\"utf-8\"", ",", "method", "=", "\"xml\"", ")", "for", "element", "in", "list", "(", "self", ".", ...
Ge thet Clusterpoint response's content as a string.
[ "Ge", "thet", "Clusterpoint", "response", "s", "content", "as", "a", "string", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L129-L132
clusterpoint/python-client-api
pycps/response.py
Response.get_content_field
def get_content_field(self, name): """ Get the contents of a specific subtag from Clusterpoint Storage's response's content tag. Args: name -- A name string of the content's subtag to be returned. Returns: A dict representing the contents of the specified field or a list of dicts if there are multiple fields with that tag name. Returns None if no field found. """ fields = self._content.findall(name) if not fields: return None elif len(fields) == 1: return etree_to_dict(fields[0])[name] else: return [etree_to_dict(field)[name] for field in fields]
python
def get_content_field(self, name): """ Get the contents of a specific subtag from Clusterpoint Storage's response's content tag. Args: name -- A name string of the content's subtag to be returned. Returns: A dict representing the contents of the specified field or a list of dicts if there are multiple fields with that tag name. Returns None if no field found. """ fields = self._content.findall(name) if not fields: return None elif len(fields) == 1: return etree_to_dict(fields[0])[name] else: return [etree_to_dict(field)[name] for field in fields]
[ "def", "get_content_field", "(", "self", ",", "name", ")", ":", "fields", "=", "self", ".", "_content", ".", "findall", "(", "name", ")", "if", "not", "fields", ":", "return", "None", "elif", "len", "(", "fields", ")", "==", "1", ":", "return", "etre...
Get the contents of a specific subtag from Clusterpoint Storage's response's content tag. Args: name -- A name string of the content's subtag to be returned. Returns: A dict representing the contents of the specified field or a list of dicts if there are multiple fields with that tag name. Returns None if no field found.
[ "Get", "the", "contents", "of", "a", "specific", "subtag", "from", "Clusterpoint", "Storage", "s", "response", "s", "content", "tag", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L134-L150
clusterpoint/python-client-api
pycps/response.py
ListResponse.get_documents
def get_documents(self, doc_format='dict'): """ Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed. """ def get_doc_id(root, rel_path): if not rel_path: return root.text else: child = root.find(rel_path[0]) if child is None: return None return get_doc_id(child, rel_path[1:]) if doc_format == 'dict': return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for document in self._get_doc_list()]) elif doc_format == 'etree': return dict([(get_doc_id(document, self._id_xpath), document) for document in self._get_doc_list()]) elif doc_format == 'list-etree': return self._get_doc_list() elif doc_format == 'list-string': return list([(ET.tostring(document)) for document in self._get_doc_list()]) elif doc_format in ('', None, 'string'): return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for document in self._get_doc_list()]) else: raise ParameterError("doc_format=" + doc_format)
python
def get_documents(self, doc_format='dict'): """ Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed. """ def get_doc_id(root, rel_path): if not rel_path: return root.text else: child = root.find(rel_path[0]) if child is None: return None return get_doc_id(child, rel_path[1:]) if doc_format == 'dict': return dict([(get_doc_id(document, self._id_xpath), etree_to_dict(document)['document']) for document in self._get_doc_list()]) elif doc_format == 'etree': return dict([(get_doc_id(document, self._id_xpath), document) for document in self._get_doc_list()]) elif doc_format == 'list-etree': return self._get_doc_list() elif doc_format == 'list-string': return list([(ET.tostring(document)) for document in self._get_doc_list()]) elif doc_format in ('', None, 'string'): return dict([(get_doc_id(document, self._id_xpath), ET.tostring(document)) for document in self._get_doc_list()]) else: raise ParameterError("doc_format=" + doc_format)
[ "def", "get_documents", "(", "self", ",", "doc_format", "=", "'dict'", ")", ":", "def", "get_doc_id", "(", "root", ",", "rel_path", ")", ":", "if", "not", "rel_path", ":", "return", "root", ".", "text", "else", ":", "child", "=", "root", ".", "find", ...
Get the documents returned from Storege in this response. Keyword args: doc_format -- Specifies the doc_format for the returned documents. Can be 'dict', 'etree' or 'string'. Default is 'dict'. Returns: A dict where keys are document ids and values depending of the required doc_format: A dict representations of documents (see etree_to_dict()); A etree Element representing the document; A raw XML document string. Raises: ParameterError -- The doc_format value is not allowed.
[ "Get", "the", "documents", "returned", "from", "Storege", "in", "this", "response", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L226-L266
clusterpoint/python-client-api
pycps/response.py
SearchResponse.get_aggregate
def get_aggregate(self): """ Get aggregate data. Returns: A dict in with queries as keys and results as values. """ return dict([(aggregate.find('query').text, [(ET.tostring(data).lstrip('<data xmlns:cps="www.clusterpoint.com" xmlns:cpse="www.clusterpoint.com">').strip().rstrip("</data>")) for data in aggregate.findall('data')]) for aggregate in self._content.findall('aggregate')])
python
def get_aggregate(self): """ Get aggregate data. Returns: A dict in with queries as keys and results as values. """ return dict([(aggregate.find('query').text, [(ET.tostring(data).lstrip('<data xmlns:cps="www.clusterpoint.com" xmlns:cpse="www.clusterpoint.com">').strip().rstrip("</data>")) for data in aggregate.findall('data')]) for aggregate in self._content.findall('aggregate')])
[ "def", "get_aggregate", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "aggregate", ".", "find", "(", "'query'", ")", ".", "text", ",", "[", "(", "ET", ".", "tostring", "(", "data", ")", ".", "lstrip", "(", "'<data xmlns:cps=\"www.clusterpoint.c...
Get aggregate data. Returns: A dict in with queries as keys and results as values.
[ "Get", "aggregate", "data", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L312-L319
clusterpoint/python-client-api
pycps/response.py
WordsResponse.get_words
def get_words(self): """ Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. """ return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count']) for word in word_list.findall('word')])) for word_list in self._content.findall('list')])
python
def get_words(self): """ Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. """ return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count']) for word in word_list.findall('word')])) for word_list in self._content.findall('list')])
[ "def", "get_words", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "word_list", ".", "attrib", "[", "'to'", "]", ",", "dict", "(", "[", "(", "word", ".", "text", ",", "word", ".", "attrib", "[", "'count'", "]", ")", "for", "word", "in", ...
Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term.
[ "Get", "words", "matching", "the", "request", "search", "terms", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L324-L335
clusterpoint/python-client-api
pycps/response.py
AlternativesResponse.get_alternatives
def get_alternatives(self): """ Get the spelling alternatives for search terms. Returns: A dict in form: {<search term>: {'count': <number of times the searh term occurs in the Storage>, 'words': {<an alternative>: {'count': <number of times the alternative occurs in the Storage>, 'cr': <cr value of the alternative>, 'idif': <idif value of the alternative>, 'h': <h value of the alternative>} } // Repeated for every alternative. } } // Repeated for every search term """ return dict([(alternatives.find('to').text, {'count': int(alternatives.find('count').text), 'words': dict([(word.text, word.attrib) for word in alternatives.findall('word')])}) for alternatives in self._content.find('alternatives_list').findall('alternatives')])
python
def get_alternatives(self): """ Get the spelling alternatives for search terms. Returns: A dict in form: {<search term>: {'count': <number of times the searh term occurs in the Storage>, 'words': {<an alternative>: {'count': <number of times the alternative occurs in the Storage>, 'cr': <cr value of the alternative>, 'idif': <idif value of the alternative>, 'h': <h value of the alternative>} } // Repeated for every alternative. } } // Repeated for every search term """ return dict([(alternatives.find('to').text, {'count': int(alternatives.find('count').text), 'words': dict([(word.text, word.attrib) for word in alternatives.findall('word')])}) for alternatives in self._content.find('alternatives_list').findall('alternatives')])
[ "def", "get_alternatives", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "alternatives", ".", "find", "(", "'to'", ")", ".", "text", ",", "{", "'count'", ":", "int", "(", "alternatives", ".", "find", "(", "'count'", ")", ".", "text", ")", ...
Get the spelling alternatives for search terms. Returns: A dict in form: {<search term>: {'count': <number of times the searh term occurs in the Storage>, 'words': {<an alternative>: {'count': <number of times the alternative occurs in the Storage>, 'cr': <cr value of the alternative>, 'idif': <idif value of the alternative>, 'h': <h value of the alternative>} } // Repeated for every alternative. } } // Repeated for every search term
[ "Get", "the", "spelling", "alternatives", "for", "search", "terms", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L340-L359
clusterpoint/python-client-api
pycps/response.py
ListFacetsResponse.get_facets
def get_facets(self): """ Get facets from the response. Returns: A dict where requested facet paths are keys and a list of coresponding terms are values. """ return dict([(facet.attrib['path'], [term.text for term in facet.findall('term')]) for facet in self._content.findall('facet')])
python
def get_facets(self): """ Get facets from the response. Returns: A dict where requested facet paths are keys and a list of coresponding terms are values. """ return dict([(facet.attrib['path'], [term.text for term in facet.findall('term')]) for facet in self._content.findall('facet')])
[ "def", "get_facets", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "facet", ".", "attrib", "[", "'path'", "]", ",", "[", "term", ".", "text", "for", "term", "in", "facet", ".", "findall", "(", "'term'", ")", "]", ")", "for", "facet", "i...
Get facets from the response. Returns: A dict where requested facet paths are keys and a list of coresponding terms are values.
[ "Get", "facets", "from", "the", "response", "." ]
train
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L364-L372
mozilla/socorrolib
socorrolib/lib/httpclient.py
HttpClient._process_response
def _process_response(self): """Return a JSON result after an HTTP Request. Process the response of an HTTP Request and make it a JSON error if it failed. Otherwise return the response's content. """ response = self.conn.getresponse() if response.status == 200 or response.status == 201: data = response.read() else: data = { "error": { "code": response.status, "reason": response.reason, "data": response.read() } } return data
python
def _process_response(self): """Return a JSON result after an HTTP Request. Process the response of an HTTP Request and make it a JSON error if it failed. Otherwise return the response's content. """ response = self.conn.getresponse() if response.status == 200 or response.status == 201: data = response.read() else: data = { "error": { "code": response.status, "reason": response.reason, "data": response.read() } } return data
[ "def", "_process_response", "(", "self", ")", ":", "response", "=", "self", ".", "conn", ".", "getresponse", "(", ")", "if", "response", ".", "status", "==", "200", "or", "response", ".", "status", "==", "201", ":", "data", "=", "response", ".", "read"...
Return a JSON result after an HTTP Request. Process the response of an HTTP Request and make it a JSON error if it failed. Otherwise return the response's content.
[ "Return", "a", "JSON", "result", "after", "an", "HTTP", "Request", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/httpclient.py#L27-L46
mozilla/socorrolib
socorrolib/lib/httpclient.py
HttpClient.post
def post(self, url, data): """Send a HTTP POST request to a URL and return the result. """ headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/json" } self.conn.request("POST", url, data, headers) return self._process_response()
python
def post(self, url, data): """Send a HTTP POST request to a URL and return the result. """ headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/json" } self.conn.request("POST", url, data, headers) return self._process_response()
[ "def", "post", "(", "self", ",", "url", ",", "data", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept\"", ":", "\"text/json\"", "}", "self", ".", "conn", ".", "request", "(", "\"POST\"", ",", "ur...
Send a HTTP POST request to a URL and return the result.
[ "Send", "a", "HTTP", "POST", "request", "to", "a", "URL", "and", "return", "the", "result", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/httpclient.py#L54-L62
mozilla/socorrolib
socorrolib/lib/httpclient.py
HttpClient.put
def put(self, url, data=None): """Send a HTTP PUT request to a URL and return the result. """ self.conn.request("PUT", url, data) return self._process_response()
python
def put(self, url, data=None): """Send a HTTP PUT request to a URL and return the result. """ self.conn.request("PUT", url, data) return self._process_response()
[ "def", "put", "(", "self", ",", "url", ",", "data", "=", "None", ")", ":", "self", ".", "conn", ".", "request", "(", "\"PUT\"", ",", "url", ",", "data", ")", "return", "self", ".", "_process_response", "(", ")" ]
Send a HTTP PUT request to a URL and return the result.
[ "Send", "a", "HTTP", "PUT", "request", "to", "a", "URL", "and", "return", "the", "result", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/httpclient.py#L64-L68
scieloorg/processing
thrift/clients.py
PublicationStats.number_of_issues_by_year
def number_of_issues_by_year(self, issn, collection, years=0, type=None): """ type: ['regular', 'supplement', 'pressrelease', 'ahead', 'special'] """ body = { "query": { "bool": { "must": [ { "match": { "issn": issn } }, { "match": { "collection": collection } } ] } }, "aggs": { "issue": { "cardinality": { "field": "issue" } } } } if type: body['query']['bool']['must'].append({"match": {"issue_type": type}}) if years != 0: body['aggs'] = { "publication_year": { "terms": { "field": "publication_year", "size": years, "order": { "_term": 'desc' } }, "aggs": { "issue": { "cardinality": { "field": "issue" } } } } } query_parameters = [ ('size', '0') ] query_result = self.search( 'article', json.dumps(body), query_parameters ) return self._compute_number_of_issues_by_year( query_result, years=years)
python
def number_of_issues_by_year(self, issn, collection, years=0, type=None): """ type: ['regular', 'supplement', 'pressrelease', 'ahead', 'special'] """ body = { "query": { "bool": { "must": [ { "match": { "issn": issn } }, { "match": { "collection": collection } } ] } }, "aggs": { "issue": { "cardinality": { "field": "issue" } } } } if type: body['query']['bool']['must'].append({"match": {"issue_type": type}}) if years != 0: body['aggs'] = { "publication_year": { "terms": { "field": "publication_year", "size": years, "order": { "_term": 'desc' } }, "aggs": { "issue": { "cardinality": { "field": "issue" } } } } } query_parameters = [ ('size', '0') ] query_result = self.search( 'article', json.dumps(body), query_parameters ) return self._compute_number_of_issues_by_year( query_result, years=years)
[ "def", "number_of_issues_by_year", "(", "self", ",", "issn", ",", "collection", ",", "years", "=", "0", ",", "type", "=", "None", ")", ":", "body", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "[", "{", "\"match\"", ":", "{...
type: ['regular', 'supplement', 'pressrelease', 'ahead', 'special']
[ "type", ":", "[", "regular", "supplement", "pressrelease", "ahead", "special", "]" ]
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/thrift/clients.py#L511-L575
scieloorg/processing
thrift/clients.py
Citedby._must_not_custom_query
def _must_not_custom_query(issn): """ Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como restrição "must_not". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir. """ custom_queries = set([utils.cleanup_string(i) for i in journal_titles.load(issn).get('must_not', [])]) for item in custom_queries: query = { "match": { "reference_source_cleaned": item } } yield query
python
def _must_not_custom_query(issn): """ Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como restrição "must_not". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir. """ custom_queries = set([utils.cleanup_string(i) for i in journal_titles.load(issn).get('must_not', [])]) for item in custom_queries: query = { "match": { "reference_source_cleaned": item } } yield query
[ "def", "_must_not_custom_query", "(", "issn", ")", ":", "custom_queries", "=", "set", "(", "[", "utils", ".", "cleanup_string", "(", "i", ")", "for", "i", "in", "journal_titles", ".", "load", "(", "issn", ")", ".", "get", "(", "'must_not'", ",", "[", "...
Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como restrição "must_not". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir.
[ "Este", "metodo", "constroi", "a", "lista", "de", "filtros", "por", "título", "de", "periódico", "que", "será", "aplicada", "na", "pesquisa", "boleana", "como", "restrição", "must_not", ".", "A", "lista", "de", "filtros", "é", "coletada", "do", "template", "...
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/thrift/clients.py#L772-L790
scieloorg/processing
thrift/clients.py
Citedby._fuzzy_custom_query
def _fuzzy_custom_query(issn, titles): """ Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como match por similaridade "should". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir. """ custom_queries = journal_titles.load(issn).get('should', []) titles = [{'title': i} for i in titles if i not in [x['title'] for x in custom_queries]] titles.extend(custom_queries) for item in titles: if len(item['title'].strip()) == 0: continue query = { "fuzzy": { "reference_source_cleaned": { "value": utils.cleanup_string(item['title']), "fuzziness": item.get('fuzziness', 3), "max_expansions": 50 } } } yield query
python
def _fuzzy_custom_query(issn, titles): """ Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como match por similaridade "should". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir. """ custom_queries = journal_titles.load(issn).get('should', []) titles = [{'title': i} for i in titles if i not in [x['title'] for x in custom_queries]] titles.extend(custom_queries) for item in titles: if len(item['title'].strip()) == 0: continue query = { "fuzzy": { "reference_source_cleaned": { "value": utils.cleanup_string(item['title']), "fuzziness": item.get('fuzziness', 3), "max_expansions": 50 } } } yield query
[ "def", "_fuzzy_custom_query", "(", "issn", ",", "titles", ")", ":", "custom_queries", "=", "journal_titles", ".", "load", "(", "issn", ")", ".", "get", "(", "'should'", ",", "[", "]", ")", "titles", "=", "[", "{", "'title'", ":", "i", "}", "for", "i"...
Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como match por similaridade "should". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir.
[ "Este", "metodo", "constroi", "a", "lista", "de", "filtros", "por", "título", "de", "periódico", "que", "será", "aplicada", "na", "pesquisa", "boleana", "como", "match", "por", "similaridade", "should", ".", "A", "lista", "de", "filtros", "é", "coletada", "d...
train
https://github.com/scieloorg/processing/blob/629b50b45ba7a176651cd3bfcdb441dab6fddfcc/thrift/clients.py#L793-L819
zhexiao/ezhost
ezhost/ServerAbstract.py
ServerAbstract.nginx_web_ssl_config
def nginx_web_ssl_config(self): """ Nginx web ssl config """ dt = [self.nginx_web_dir, self.nginx_ssl_dir] return nginx_conf_string.simple_ssl_web_conf.format(dt=dt)
python
def nginx_web_ssl_config(self): """ Nginx web ssl config """ dt = [self.nginx_web_dir, self.nginx_ssl_dir] return nginx_conf_string.simple_ssl_web_conf.format(dt=dt)
[ "def", "nginx_web_ssl_config", "(", "self", ")", ":", "dt", "=", "[", "self", ".", "nginx_web_dir", ",", "self", ".", "nginx_ssl_dir", "]", "return", "nginx_conf_string", ".", "simple_ssl_web_conf", ".", "format", "(", "dt", "=", "dt", ")" ]
Nginx web ssl config
[ "Nginx", "web", "ssl", "config" ]
train
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerAbstract.py#L177-L183
i3visio/entify
entify/lib/patterns/dni.py
DNI.isValidExp
def isValidExp(self, exp): ''' Method to verify if a given expression is correct just in case the used regular expression needs additional processing to verify this fact.$ This method will be overwritten when necessary. :param exp: Expression to verify. :return: True | False ''' # order of the letters depending on which is the mod of the number # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 order = ['T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z', 'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E', 'T'] #print exp l = exp[len(exp)-1] try: # verifying if it is an 8-length number number = int(exp[0:7]) except: try: # verifying if it is a 7-length number number = int(exp[0:6]) except: # not a valid number pass if l == order[number%23]: return True else: return False
python
def isValidExp(self, exp): ''' Method to verify if a given expression is correct just in case the used regular expression needs additional processing to verify this fact.$ This method will be overwritten when necessary. :param exp: Expression to verify. :return: True | False ''' # order of the letters depending on which is the mod of the number # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 order = ['T', 'R', 'W', 'A', 'G', 'M', 'Y', 'F', 'P', 'D', 'X', 'B', 'N', 'J', 'Z', 'S', 'Q', 'V', 'H', 'L', 'C', 'K', 'E', 'T'] #print exp l = exp[len(exp)-1] try: # verifying if it is an 8-length number number = int(exp[0:7]) except: try: # verifying if it is a 7-length number number = int(exp[0:6]) except: # not a valid number pass if l == order[number%23]: return True else: return False
[ "def", "isValidExp", "(", "self", ",", "exp", ")", ":", "# order of the letters depending on which is the mod of the number", "# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23", "order", "=", "[", "'T'", ",", ...
Method to verify if a given expression is correct just in case the used regular expression needs additional processing to verify this fact.$ This method will be overwritten when necessary. :param exp: Expression to verify. :return: True | False
[ "Method", "to", "verify", "if", "a", "given", "expression", "is", "correct", "just", "in", "case", "the", "used", "regular", "expression", "needs", "additional", "processing", "to", "verify", "this", "fact", ".", "$", "This", "method", "will", "be", "overwri...
train
https://github.com/i3visio/entify/blob/51c5b89cebee3a39d44d0918e2798739361f337c/entify/lib/patterns/dni.py#L42-L71
henzk/ape
ape/container_mode/validators/product_spec_validator.py
ProductSpecValidator.is_valid
def is_valid(self): """ Checks the feature list product spec against. Checks if all mandartory features are contained; Checks that all "never" features are not contained :return: boolean """ for spec in self.product_specs: for feature in spec.get('mandatory', []): if feature.replace('__', '.') not in self.feature_list: self.errors_mandatory.append(feature) for feature in spec.get('never', []): if feature.replace('__', '.') in self.feature_list: self.errors_never.append(feature) return not self.has_errors()
python
def is_valid(self): """ Checks the feature list product spec against. Checks if all mandartory features are contained; Checks that all "never" features are not contained :return: boolean """ for spec in self.product_specs: for feature in spec.get('mandatory', []): if feature.replace('__', '.') not in self.feature_list: self.errors_mandatory.append(feature) for feature in spec.get('never', []): if feature.replace('__', '.') in self.feature_list: self.errors_never.append(feature) return not self.has_errors()
[ "def", "is_valid", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "product_specs", ":", "for", "feature", "in", "spec", ".", "get", "(", "'mandatory'", ",", "[", "]", ")", ":", "if", "feature", ".", "replace", "(", "'__'", ",", "'.'", ")"...
Checks the feature list product spec against. Checks if all mandartory features are contained; Checks that all "never" features are not contained :return: boolean
[ "Checks", "the", "feature", "list", "product", "spec", "against", ".", "Checks", "if", "all", "mandartory", "features", "are", "contained", ";", "Checks", "that", "all", "never", "features", "are", "not", "contained", ":", "return", ":", "boolean" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/product_spec_validator.py#L21-L38
henzk/ape
ape/container_mode/validators/product_spec_validator.py
ProductSpecValidator._read
def _read(self, spec_path, product_name): """ Reads the spec files and extracts the concrete product spec. :param spec_path: :param product_name: :return: """ matches = [] with codecs.open(spec_path, 'r') as f: for entry in json.loads(f.read()): if product_name in entry.get('products'): matches.append(entry) return matches
python
def _read(self, spec_path, product_name): """ Reads the spec files and extracts the concrete product spec. :param spec_path: :param product_name: :return: """ matches = [] with codecs.open(spec_path, 'r') as f: for entry in json.loads(f.read()): if product_name in entry.get('products'): matches.append(entry) return matches
[ "def", "_read", "(", "self", ",", "spec_path", ",", "product_name", ")", ":", "matches", "=", "[", "]", "with", "codecs", ".", "open", "(", "spec_path", ",", "'r'", ")", "as", "f", ":", "for", "entry", "in", "json", ".", "loads", "(", "f", ".", "...
Reads the spec files and extracts the concrete product spec. :param spec_path: :param product_name: :return:
[ "Reads", "the", "spec", "files", "and", "extracts", "the", "concrete", "product", "spec", ".", ":", "param", "spec_path", ":", ":", "param", "product_name", ":", ":", "return", ":" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/container_mode/validators/product_spec_validator.py#L61-L73
azraq27/neural
neural/connectivity.py
connectivity_map
def connectivity_map(dset,prefix,x,y,z,radius=2): '''Will perform connectivity analysis on ``dset`` using seed point ``(x,y,z)`` (in RAI order) with a sphere of radius ``radius``. Does not perform any preprocessing of ``dset``. This should be already motion corrected, noise-regressed, residualized, etc.''' seed_series = nl.sphere_average(dset,x,y,z,radius) with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write('\n'.join([str(x) for x in seed_series])) decon = nl.Decon() decon.input_dsets = dset decon.stim_files = {'seed':temp.name} decon.prefix = prefix decon.run() try: os.remove(temp.name) except: pass
python
def connectivity_map(dset,prefix,x,y,z,radius=2): '''Will perform connectivity analysis on ``dset`` using seed point ``(x,y,z)`` (in RAI order) with a sphere of radius ``radius``. Does not perform any preprocessing of ``dset``. This should be already motion corrected, noise-regressed, residualized, etc.''' seed_series = nl.sphere_average(dset,x,y,z,radius) with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write('\n'.join([str(x) for x in seed_series])) decon = nl.Decon() decon.input_dsets = dset decon.stim_files = {'seed':temp.name} decon.prefix = prefix decon.run() try: os.remove(temp.name) except: pass
[ "def", "connectivity_map", "(", "dset", ",", "prefix", ",", "x", ",", "y", ",", "z", ",", "radius", "=", "2", ")", ":", "seed_series", "=", "nl", ".", "sphere_average", "(", "dset", ",", "x", ",", "y", ",", "z", ",", "radius", ")", "with", "tempf...
Will perform connectivity analysis on ``dset`` using seed point ``(x,y,z)`` (in RAI order) with a sphere of radius ``radius``. Does not perform any preprocessing of ``dset``. This should be already motion corrected, noise-regressed, residualized, etc.
[ "Will", "perform", "connectivity", "analysis", "on", "dset", "using", "seed", "point", "(", "x", "y", "z", ")", "(", "in", "RAI", "order", ")", "with", "a", "sphere", "of", "radius", "radius", ".", "Does", "not", "perform", "any", "preprocessing", "of", ...
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/connectivity.py#L5-L19
Kunstmord/datalib
src/dataset.py
extract_feature_base
def extract_feature_base(dbpath, folder_path, set_object, extractor, force_extraction=False, verbose=0, add_args=None, custom_name=None): """ Generic function which extracts a feature and stores it in the database Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database extractor : function, which takes the path of a data point and *args as parameters and returns a feature force_extraction : boolean, if True - will re-extract feature even if a feature with this name already exists in the database, otherwise, will only extract if the feature doesn't exist in the database. default value: False verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted ever verbose steps (for example, verbose=500 will print 0, 500, 1000 etc.). default value: 0 add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the extractor should take only one input argument - the file path. default value: None custom_name : string, optional name for the feature (it will be stored in the database with the custom_name instead of extractor function name). if None, the extractor function name will be used. default value: None Returns ------- None """ if custom_name is None: extractor_name = extractor.__name__ else: extractor_name = custom_name engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() a = 0 tmp_object = session.query(set_object).get(1) if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): if add_args is None: i.features = {extractor_name: extractor(join(folder_path, i.path))} else: i.features = {extractor_name: extractor(join(folder_path, i.path), add_args)} if verbose > 0: if a % verbose == 0: print a a += 1 elif (extractor_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): if add_args is None: i.features[extractor_name] = extractor(join(folder_path, i.path)) else: i.features[extractor_name] = extractor(join(folder_path, i.path), add_args) if verbose > 0: if a % verbose == 0: print a a += 1 session.commit() session.close() return None
python
def extract_feature_base(dbpath, folder_path, set_object, extractor, force_extraction=False, verbose=0, add_args=None, custom_name=None): """ Generic function which extracts a feature and stores it in the database Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database extractor : function, which takes the path of a data point and *args as parameters and returns a feature force_extraction : boolean, if True - will re-extract feature even if a feature with this name already exists in the database, otherwise, will only extract if the feature doesn't exist in the database. default value: False verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted ever verbose steps (for example, verbose=500 will print 0, 500, 1000 etc.). default value: 0 add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the extractor should take only one input argument - the file path. default value: None custom_name : string, optional name for the feature (it will be stored in the database with the custom_name instead of extractor function name). if None, the extractor function name will be used. default value: None Returns ------- None """ if custom_name is None: extractor_name = extractor.__name__ else: extractor_name = custom_name engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() a = 0 tmp_object = session.query(set_object).get(1) if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): if add_args is None: i.features = {extractor_name: extractor(join(folder_path, i.path))} else: i.features = {extractor_name: extractor(join(folder_path, i.path), add_args)} if verbose > 0: if a % verbose == 0: print a a += 1 elif (extractor_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): if add_args is None: i.features[extractor_name] = extractor(join(folder_path, i.path)) else: i.features[extractor_name] = extractor(join(folder_path, i.path), add_args) if verbose > 0: if a % verbose == 0: print a a += 1 session.commit() session.close() return None
[ "def", "extract_feature_base", "(", "dbpath", ",", "folder_path", ",", "set_object", ",", "extractor", ",", "force_extraction", "=", "False", ",", "verbose", "=", "0", ",", "add_args", "=", "None", ",", "custom_name", "=", "None", ")", ":", "if", "custom_nam...
Generic function which extracts a feature and stores it in the database Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database extractor : function, which takes the path of a data point and *args as parameters and returns a feature force_extraction : boolean, if True - will re-extract feature even if a feature with this name already exists in the database, otherwise, will only extract if the feature doesn't exist in the database. default value: False verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted ever verbose steps (for example, verbose=500 will print 0, 500, 1000 etc.). default value: 0 add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the extractor should take only one input argument - the file path. default value: None custom_name : string, optional name for the feature (it will be stored in the database with the custom_name instead of extractor function name). if None, the extractor function name will be used. default value: None Returns ------- None
[ "Generic", "function", "which", "extracts", "a", "feature", "and", "stores", "it", "in", "the", "database" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L17-L74
Kunstmord/datalib
src/dataset.py
return_features_base
def return_features_base(dbpath, set_object, names): """ Generic function which returns a list of extracted features from the database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_list : list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside list' is a feature (can be of any type) """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] if names == 'all': for i in session.query(set_object).order_by(set_object.id): row_list = [] for feature in i.features: row_list.append(i.features[feature]) return_list.append(row_list[:]) else: for i in session.query(set_object).order_by(set_object.id): row_list = [] for feature in i.features: if feature in names: row_list.append(i.features[feature]) return_list.append(row_list[:]) return return_list
python
def return_features_base(dbpath, set_object, names): """ Generic function which returns a list of extracted features from the database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_list : list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside list' is a feature (can be of any type) """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] if names == 'all': for i in session.query(set_object).order_by(set_object.id): row_list = [] for feature in i.features: row_list.append(i.features[feature]) return_list.append(row_list[:]) else: for i in session.query(set_object).order_by(set_object.id): row_list = [] for feature in i.features: if feature in names: row_list.append(i.features[feature]) return_list.append(row_list[:]) return return_list
[ "def", "return_features_base", "(", "dbpath", ",", "set_object", ",", "names", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl",...
Generic function which returns a list of extracted features from the database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_list : list of lists, each 'inside list' corresponds to a single data point, each element of the 'inside list' is a feature (can be of any type)
[ "Generic", "function", "which", "returns", "a", "list", "of", "extracted", "features", "from", "the", "database" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L138-L171
Kunstmord/datalib
src/dataset.py
return_features_numpy_base
def return_features_numpy_base(dbpath, set_object, points_amt, names): """ Generic function which returns a 2d numpy array of extracted features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported. """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if names == 'all': columns_amt = 0 for feature in tmp_object.features: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 else: columns_amt = 0 for feature in tmp_object.features: if feature in names: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: if feature in names: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 session.close() return return_array
python
def return_features_numpy_base(dbpath, set_object, points_amt, names): """ Generic function which returns a 2d numpy array of extracted features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported. """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if names == 'all': columns_amt = 0 for feature in tmp_object.features: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 else: columns_amt = 0 for feature in tmp_object.features: if feature in names: if type(tmp_object.features[feature]) is np.ndarray: columns_amt += tmp_object.features[feature].shape[0] else: columns_amt += 1 return_array = np.zeros([points_amt, columns_amt]) for i in enumerate(session.query(set_object).order_by(set_object.id)): counter = 0 for feature in i[1].features: if feature in names: feature_val = i[1].features[feature] if type(feature_val) is np.ndarray: columns_amt = feature_val.shape[0] return_array[i[0], counter:counter + columns_amt] = feature_val[:] counter += feature_val.shape[0] else: return_array[i[0], counter] = feature_val counter += 1 session.close() return return_array
[ "def", "return_features_numpy_base", "(", "dbpath", ",", "set_object", ",", "points_amt", ",", "names", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "ses...
Generic function which returns a 2d numpy array of extracted features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database names : list of strings, a list of feature names which are to be retrieved from the database, if equal to 'all', all features will be returned Returns ------- return_array : ndarray of features, each row corresponds to a single datapoint. If a single feature is a 1d numpy array, then it will be unrolled into the resulting array. Higher-dimensional numpy arrays are not supported.
[ "Generic", "function", "which", "returns", "a", "2d", "numpy", "array", "of", "extracted", "features" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L174-L238
Kunstmord/datalib
src/dataset.py
return_real_id_base
def return_real_id_base(dbpath, set_object): """ Generic function which returns a list of real_id's Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix) """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] for i in session.query(set_object).order_by(set_object.id): return_list.append(i.real_id) session.close() return return_list
python
def return_real_id_base(dbpath, set_object): """ Generic function which returns a list of real_id's Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix) """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] for i in session.query(set_object).order_by(set_object.id): return_list.append(i.real_id) session.close() return return_list
[ "def", "return_real_id_base", "(", "dbpath", ",", "set_object", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl", "(", ")", "r...
Generic function which returns a list of real_id's Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
[ "Generic", "function", "which", "returns", "a", "list", "of", "real_id", "s" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L241-L261
Kunstmord/datalib
src/dataset.py
return_feature_list_base
def return_feature_list_base(dbpath, set_object): """ Generic function which returns a list of the names of all available features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of strings corresponding to all available features """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] tmp_object = session.query(set_object).get(1) for feature in tmp_object.features: return_list.append(feature) session.close() return return_list
python
def return_feature_list_base(dbpath, set_object): """ Generic function which returns a list of the names of all available features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of strings corresponding to all available features """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] tmp_object = session.query(set_object).get(1) for feature in tmp_object.features: return_list.append(feature) session.close() return return_list
[ "def", "return_feature_list_base", "(", "dbpath", ",", "set_object", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl", "(", ")",...
Generic function which returns a list of the names of all available features Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of strings corresponding to all available features
[ "Generic", "function", "which", "returns", "a", "list", "of", "the", "names", "of", "all", "available", "features" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L264-L285
Kunstmord/datalib
src/dataset.py
return_feature_list_numpy_base
def return_feature_list_numpy_base(dbpath, set_object): """ Generic function which returns a list of tuples containing, each containing the name of the feature and the length of the corresponding 1d numpy array of the feature (or length of the list) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of tuples containing the name of the feature and the length of the corresponding list or 1d numpy array """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] tmp_object = session.query(set_object).get(1) for feature in tmp_object.features: if type(tmp_object.features[feature]) is np.ndarray: flength = tmp_object.features[feature].shape[0] else: flength = 1 return_list.append((feature, flength)) session.close() return return_list
python
def return_feature_list_numpy_base(dbpath, set_object): """ Generic function which returns a list of tuples containing, each containing the name of the feature and the length of the corresponding 1d numpy array of the feature (or length of the list) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of tuples containing the name of the feature and the length of the corresponding list or 1d numpy array """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] tmp_object = session.query(set_object).get(1) for feature in tmp_object.features: if type(tmp_object.features[feature]) is np.ndarray: flength = tmp_object.features[feature].shape[0] else: flength = 1 return_list.append((feature, flength)) session.close() return return_list
[ "def", "return_feature_list_numpy_base", "(", "dbpath", ",", "set_object", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl", "(", ...
Generic function which returns a list of tuples containing, each containing the name of the feature and the length of the corresponding 1d numpy array of the feature (or length of the list) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of tuples containing the name of the feature and the length of the corresponding list or 1d numpy array
[ "Generic", "function", "which", "returns", "a", "list", "of", "tuples", "containing", "each", "containing", "the", "name", "of", "the", "feature", "and", "the", "length", "of", "the", "corresponding", "1d", "numpy", "array", "of", "the", "feature", "(", "or"...
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L288-L315
Kunstmord/datalib
src/dataset.py
copy_features_base
def copy_features_base(dbpath_origin, dbpath_destination, set_object, force_copy=False): """ Generic function which copies features from one database to another (base object should be of the same type) Parameters ---------- dbpath_origin : string, path to SQLite database file from which the features will be copied dbpath_destination : string, path to SQLite database file to which the features will be copied set_object : object (either TestSet or TrainSet) which is stored in the database force_copy : boolean, if True - will overwrite features with same name when copying, if False, won't; default value: False Returns ------- None """ engine_origin = create_engine('sqlite:////' + dbpath_origin) engine_destination = create_engine('sqlite:////' + dbpath_destination) session_cl_origin = sessionmaker(bind=engine_origin) session_cl_destination = sessionmaker(bind=engine_destination) session_origin = session_cl_origin() session_destination = session_cl_destination() if force_copy is True: for i in session_origin.query(set_object).order_by(set_object.id): dest_obj = session_destination.query(set_object).get(i.id) for feature in i.features: if dest_obj.features is not None: dest_obj.features[feature] = i.features[feature] else: dest_obj.features = {feature: i.features[feature]} else: for i in session_origin.query(set_object).order_by(set_object.id): dest_obj = session_destination.query(set_object).get(i.id) for feature in i.features: if dest_obj.features is not None: if (feature not in dest_obj.features) or force_copy is True: dest_obj.features[feature] = i.features[feature] else: dest_obj.features = {feature: i.features[feature]} session_origin.close() session_destination.commit() session_destination.close() return None
python
def copy_features_base(dbpath_origin, dbpath_destination, set_object, force_copy=False): """ Generic function which copies features from one database to another (base object should be of the same type) Parameters ---------- dbpath_origin : string, path to SQLite database file from which the features will be copied dbpath_destination : string, path to SQLite database file to which the features will be copied set_object : object (either TestSet or TrainSet) which is stored in the database force_copy : boolean, if True - will overwrite features with same name when copying, if False, won't; default value: False Returns ------- None """ engine_origin = create_engine('sqlite:////' + dbpath_origin) engine_destination = create_engine('sqlite:////' + dbpath_destination) session_cl_origin = sessionmaker(bind=engine_origin) session_cl_destination = sessionmaker(bind=engine_destination) session_origin = session_cl_origin() session_destination = session_cl_destination() if force_copy is True: for i in session_origin.query(set_object).order_by(set_object.id): dest_obj = session_destination.query(set_object).get(i.id) for feature in i.features: if dest_obj.features is not None: dest_obj.features[feature] = i.features[feature] else: dest_obj.features = {feature: i.features[feature]} else: for i in session_origin.query(set_object).order_by(set_object.id): dest_obj = session_destination.query(set_object).get(i.id) for feature in i.features: if dest_obj.features is not None: if (feature not in dest_obj.features) or force_copy is True: dest_obj.features[feature] = i.features[feature] else: dest_obj.features = {feature: i.features[feature]} session_origin.close() session_destination.commit() session_destination.close() return None
[ "def", "copy_features_base", "(", "dbpath_origin", ",", "dbpath_destination", ",", "set_object", ",", "force_copy", "=", "False", ")", ":", "engine_origin", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath_origin", ")", "engine_destination", "=", "create_engin...
Generic function which copies features from one database to another (base object should be of the same type) Parameters ---------- dbpath_origin : string, path to SQLite database file from which the features will be copied dbpath_destination : string, path to SQLite database file to which the features will be copied set_object : object (either TestSet or TrainSet) which is stored in the database force_copy : boolean, if True - will overwrite features with same name when copying, if False, won't; default value: False Returns ------- None
[ "Generic", "function", "which", "copies", "features", "from", "one", "database", "to", "another", "(", "base", "object", "should", "be", "of", "the", "same", "type", ")" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L318-L360
Kunstmord/datalib
src/dataset.py
return_single_real_id_base
def return_single_real_id_base(dbpath, set_object, object_id): """ Generic function which returns a real_id string of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- real_id : string """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.real_id
python
def return_single_real_id_base(dbpath, set_object, object_id): """ Generic function which returns a real_id string of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- real_id : string """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.real_id
[ "def", "return_single_real_id_base", "(", "dbpath", ",", "set_object", ",", "object_id", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "se...
Generic function which returns a real_id string of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- real_id : string
[ "Generic", "function", "which", "returns", "a", "real_id", "string", "of", "an", "object", "specified", "by", "the", "object_id" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L363-L382
Kunstmord/datalib
src/dataset.py
return_single_path_base
def return_single_path_base(dbpath, set_object, object_id): """ Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- path : string """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.path
python
def return_single_path_base(dbpath, set_object, object_id): """ Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- path : string """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.path
[ "def", "return_single_path_base", "(", "dbpath", ",", "set_object", ",", "object_id", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "sessi...
Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- path : string
[ "Generic", "function", "which", "returns", "a", "path", "(", "path", "is", "relative", "to", "the", "path_to_set", "stored", "in", "the", "database", ")", "of", "an", "object", "specified", "by", "the", "object_id" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L385-L405
Kunstmord/datalib
src/dataset.py
return_single_features_base
def return_single_features_base(dbpath, set_object, object_id): """ Generic function which returns the features of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- features : dict containing the features """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.features
python
def return_single_features_base(dbpath, set_object, object_id): """ Generic function which returns the features of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- features : dict containing the features """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.features
[ "def", "return_single_features_base", "(", "dbpath", ",", "set_object", ",", "object_id", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "s...
Generic function which returns the features of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- features : dict containing the features
[ "Generic", "function", "which", "returns", "the", "features", "of", "an", "object", "specified", "by", "the", "object_id" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L408-L427
Kunstmord/datalib
src/dataset.py
return_single_convert_numpy_base
def return_single_convert_numpy_base(dbpath, folder_path, set_object, object_id, converter, add_args=None): """ Generic function which converts an object specified by the object_id into a numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : ndarray """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() if add_args is None: return converter(join(folder_path, tmp_object.path)) else: return converter(join(folder_path, tmp_object.path), add_args)
python
def return_single_convert_numpy_base(dbpath, folder_path, set_object, object_id, converter, add_args=None): """ Generic function which converts an object specified by the object_id into a numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : ndarray """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() if add_args is None: return converter(join(folder_path, tmp_object.path)) else: return converter(join(folder_path, tmp_object.path), add_args)
[ "def", "return_single_convert_numpy_base", "(", "dbpath", ",", "folder_path", ",", "set_object", ",", "object_id", ",", "converter", ",", "add_args", "=", "None", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "...
Generic function which converts an object specified by the object_id into a numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : ndarray
[ "Generic", "function", "which", "converts", "an", "object", "specified", "by", "the", "object_id", "into", "a", "numpy", "array", "and", "returns", "the", "array", "the", "conversion", "is", "done", "by", "the", "converter", "function" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L430-L457
Kunstmord/datalib
src/dataset.py
return_multiple_convert_numpy_base
def return_multiple_convert_numpy_base(dbpath, folder_path, set_object, start_id, end_id, converter, add_args=None): """ Generic function which converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database start_id : the id of the first object to be converted end_id : the id of the last object to be converted converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(start_id) if add_args is None: converted = converter(join(folder_path, tmp_object.path)) else: converted = converter(join(folder_path, tmp_object.path), add_args) if len(converted.shape) == 0: columns_amt = 1 else: columns_amt = converted.shape[0] return_array = np.zeros([end_id - start_id + 1, columns_amt]) for i in xrange(end_id - start_id + 1): tmp_object = session.query(set_object).get(start_id + i) if add_args is None: return_array[i, :] = converter(join(folder_path, tmp_object.path)) else: return_array[i, :] = converter(join(folder_path, tmp_object.path), add_args) session.close() return return_array
python
def return_multiple_convert_numpy_base(dbpath, folder_path, set_object, start_id, end_id, converter, add_args=None): """ Generic function which converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database start_id : the id of the first object to be converted end_id : the id of the last object to be converted converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(start_id) if add_args is None: converted = converter(join(folder_path, tmp_object.path)) else: converted = converter(join(folder_path, tmp_object.path), add_args) if len(converted.shape) == 0: columns_amt = 1 else: columns_amt = converted.shape[0] return_array = np.zeros([end_id - start_id + 1, columns_amt]) for i in xrange(end_id - start_id + 1): tmp_object = session.query(set_object).get(start_id + i) if add_args is None: return_array[i, :] = converter(join(folder_path, tmp_object.path)) else: return_array[i, :] = converter(join(folder_path, tmp_object.path), add_args) session.close() return return_array
[ "def", "return_multiple_convert_numpy_base", "(", "dbpath", ",", "folder_path", ",", "set_object", ",", "start_id", ",", "end_id", ",", "converter", ",", "add_args", "=", "None", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")",...
Generic function which converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- dbpath : string, path to SQLite database file folder_path : string, path to folder where the files are stored set_object : object (either TestSet or TrainSet) which is stored in the database start_id : the id of the first object to be converted end_id : the id of the last object to be converted converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray
[ "Generic", "function", "which", "converts", "several", "objects", "with", "ids", "in", "the", "range", "(", "start_id", "end_id", ")", "into", "a", "2d", "numpy", "array", "and", "returns", "the", "array", "the", "conversion", "is", "done", "by", "the", "c...
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L460-L500
Kunstmord/datalib
src/dataset.py
dump_feature_base
def dump_feature_base(dbpath, set_object, points_amt, feature_name, feature, force_extraction=True): """ Generic function which dumps a list of lists or ndarray of features into database (allows to copy features from a pre-existing .txt/.csv/.whatever file, for example) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database feature : list of lists or ndarray, contains the data to be written to the database force_extraction : boolean, if True - will overwrite any existing feature with this name default value: False Returns ------- None """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() a = 0 tmp_object = session.query(set_object).get(1) if type(feature) is np.ndarray: if feature.shape[0] != points_amt: raise errors.WrongSize(feature_name) else: if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): i.features = {feature_name: feature_name[a, :]} a += 1 elif (feature_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): i.features[feature_name] = feature_name[a, :] a += 1 else: if len(feature) != points_amt: raise errors.WrongSize(feature_name) else: if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): i.features = {feature_name: feature_name[a]} a += 1 elif (feature_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): i.features[feature_name] = feature_name[a] a += 1 session.commit() session.close() return None
python
def dump_feature_base(dbpath, set_object, points_amt, feature_name, feature, force_extraction=True): """ Generic function which dumps a list of lists or ndarray of features into database (allows to copy features from a pre-existing .txt/.csv/.whatever file, for example) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database feature : list of lists or ndarray, contains the data to be written to the database force_extraction : boolean, if True - will overwrite any existing feature with this name default value: False Returns ------- None """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() a = 0 tmp_object = session.query(set_object).get(1) if type(feature) is np.ndarray: if feature.shape[0] != points_amt: raise errors.WrongSize(feature_name) else: if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): i.features = {feature_name: feature_name[a, :]} a += 1 elif (feature_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): i.features[feature_name] = feature_name[a, :] a += 1 else: if len(feature) != points_amt: raise errors.WrongSize(feature_name) else: if tmp_object.features is None: for i in session.query(set_object).order_by(set_object.id): i.features = {feature_name: feature_name[a]} a += 1 elif (feature_name not in tmp_object.features) or force_extraction is True: for i in session.query(set_object).order_by(set_object.id): i.features[feature_name] = feature_name[a] a += 1 session.commit() session.close() return None
[ "def", "dump_feature_base", "(", "dbpath", ",", "set_object", ",", "points_amt", ",", "feature_name", ",", "feature", ",", "force_extraction", "=", "True", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "...
Generic function which dumps a list of lists or ndarray of features into database (allows to copy features from a pre-existing .txt/.csv/.whatever file, for example) Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database points_amt : int, number of data points in the database feature : list of lists or ndarray, contains the data to be written to the database force_extraction : boolean, if True - will overwrite any existing feature with this name default value: False Returns ------- None
[ "Generic", "function", "which", "dumps", "a", "list", "of", "lists", "or", "ndarray", "of", "features", "into", "database", "(", "allows", "to", "copy", "features", "from", "a", "pre", "-", "existing", ".", "txt", "/", ".", "csv", "/", ".", "whatever", ...
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L503-L553
Kunstmord/datalib
src/dataset.py
delete_feature_base
def delete_feature_base(dbpath, set_object, name): """ Generic function which deletes a feature from a database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database name : string, name of the feature to be deleted Returns ------- None """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if tmp_object.features is not None and name in tmp_object.features: for i in session.query(set_object).order_by(set_object.id): del i.features[name] session.commit() session.close() return None
python
def delete_feature_base(dbpath, set_object, name): """ Generic function which deletes a feature from a database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database name : string, name of the feature to be deleted Returns ------- None """ engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if tmp_object.features is not None and name in tmp_object.features: for i in session.query(set_object).order_by(set_object.id): del i.features[name] session.commit() session.close() return None
[ "def", "delete_feature_base", "(", "dbpath", ",", "set_object", ",", "name", ")", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "dbpath", ")", "session_cl", "=", "sessionmaker", "(", "bind", "=", "engine", ")", "session", "=", "session_cl", ...
Generic function which deletes a feature from a database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database name : string, name of the feature to be deleted Returns ------- None
[ "Generic", "function", "which", "deletes", "a", "feature", "from", "a", "database" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L556-L579
Kunstmord/datalib
src/dataset.py
DataSetBase.prepopulate
def prepopulate(self): """ Creates a database file (if it doesn't exist, writes each data point's path, real_id into it) Parameters ---------- self Returns ------- None """ if self._prepopulated is False: engine = create_engine('sqlite:////' + self.dbpath) self._db_base.metadata.create_all(engine) self._prepopulated = True session_cl = sessionmaker(bind=engine) session = session_cl() for (dirpath, dirnames, filenames) in walk(self.path_to_set): for f_name in filenames: datapoint = self._set_object(real_id=cutoff_filename(self.file_prefix, self.file_suffix, f_name), path=f_name, features=None) session.add(datapoint) self.points_amt += 1 session.commit() session.close() return None
python
def prepopulate(self): """ Creates a database file (if it doesn't exist, writes each data point's path, real_id into it) Parameters ---------- self Returns ------- None """ if self._prepopulated is False: engine = create_engine('sqlite:////' + self.dbpath) self._db_base.metadata.create_all(engine) self._prepopulated = True session_cl = sessionmaker(bind=engine) session = session_cl() for (dirpath, dirnames, filenames) in walk(self.path_to_set): for f_name in filenames: datapoint = self._set_object(real_id=cutoff_filename(self.file_prefix, self.file_suffix, f_name), path=f_name, features=None) session.add(datapoint) self.points_amt += 1 session.commit() session.close() return None
[ "def", "prepopulate", "(", "self", ")", ":", "if", "self", ".", "_prepopulated", "is", "False", ":", "engine", "=", "create_engine", "(", "'sqlite:////'", "+", "self", ".", "dbpath", ")", "self", ".", "_db_base", ".", "metadata", ".", "create_all", "(", ...
Creates a database file (if it doesn't exist, writes each data point's path, real_id into it) Parameters ---------- self Returns ------- None
[ "Creates", "a", "database", "file", "(", "if", "it", "doesn", "t", "exist", "writes", "each", "data", "point", "s", "path", "real_id", "into", "it", ")" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L621-L648