repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
LIVVkit/LIVVkit
livvkit/util/elements.py
section
def section(title, element_list): """ Returns a dictionary representing a new section. Sections contain a list of elements that are displayed separately from the global elements on the page. Args: title: The title of the section to be displayed element_list: The list of elements to display within the section Returns: A dictionary with metadata specifying that it is to be rendered as a section containing multiple elements """ sect = { 'Type': 'Section', 'Title': title, } if isinstance(element_list, list): sect['Elements'] = element_list else: sect['Elements'] = [element_list] return sect
python
def section(title, element_list): """ Returns a dictionary representing a new section. Sections contain a list of elements that are displayed separately from the global elements on the page. Args: title: The title of the section to be displayed element_list: The list of elements to display within the section Returns: A dictionary with metadata specifying that it is to be rendered as a section containing multiple elements """ sect = { 'Type': 'Section', 'Title': title, } if isinstance(element_list, list): sect['Elements'] = element_list else: sect['Elements'] = [element_list] return sect
[ "def", "section", "(", "title", ",", "element_list", ")", ":", "sect", "=", "{", "'Type'", ":", "'Section'", ",", "'Title'", ":", "title", ",", "}", "if", "isinstance", "(", "element_list", ",", "list", ")", ":", "sect", "[", "'Elements'", "]", "=", ...
Returns a dictionary representing a new section. Sections contain a list of elements that are displayed separately from the global elements on the page. Args: title: The title of the section to be displayed element_list: The list of elements to display within the section Returns: A dictionary with metadata specifying that it is to be rendered as a section containing multiple elements
[ "Returns", "a", "dictionary", "representing", "a", "new", "section", ".", "Sections", "contain", "a", "list", "of", "elements", "that", "are", "displayed", "separately", "from", "the", "global", "elements", "on", "the", "page", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/elements.py#L129-L152
LIVVkit/LIVVkit
livvkit/util/elements.py
image
def image(title, desc, image_name, group=None, height=None): """ Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element """ ie = { 'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name, } if group: ie['Group'] = group if height: ie['Height'] = height return ie
python
def image(title, desc, image_name, group=None, height=None): """ Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element """ ie = { 'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name, } if group: ie['Group'] = group if height: ie['Height'] = height return ie
[ "def", "image", "(", "title", ",", "desc", ",", "image_name", ",", "group", "=", "None", ",", "height", "=", "None", ")", ":", "ie", "=", "{", "'Type'", ":", "'Image'", ",", "'Title'", ":", "title", ",", "'Description'", ":", "desc", ",", "'Plot File...
Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element
[ "Builds", "an", "image", "element", ".", "Image", "elements", "are", "primarily", "created", "and", "then", "wrapped", "into", "an", "image", "gallery", "element", ".", "This", "is", "not", "required", "behavior", "however", "and", "it", "s", "independent", ...
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/util/elements.py#L261-L294
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/execute.py
Execute.commands
def commands(self): """ Fetch individual SQL commands from a SQL commands containing many commands. :return: List of commands """ # Retrieve all commands via split function or splitting on ';' print('\tRetrieving commands from', self.sql_script) print('\tUsing command splitter algorithm {0}'.format(self.split_algo)) with Timer('\tRetrieved commands in'): # Split commands # sqlparse packages split function combined with sql_split function if self.split_algo is 'sql_parse': commands = SplitCommands(self.sql_script).sql_parse # Split on every ';' (unreliable) elif self.split_algo is 'simple_split': commands = SplitCommands(self.sql_script).simple_split() # sqlparse package without additional splitting elif self.split_algo is 'sql_parse_nosplit': commands = SplitCommands(self.sql_script).sql_parse_nosplit # Parse every char of the SQL commands and determine breakpoints elif self.split_algo is 'sql_split': commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False) else: commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False) # remove dbo. prefixes from table names cleaned_commands = [com.replace("dbo.", '') for com in commands] return cleaned_commands
python
def commands(self): """ Fetch individual SQL commands from a SQL commands containing many commands. :return: List of commands """ # Retrieve all commands via split function or splitting on ';' print('\tRetrieving commands from', self.sql_script) print('\tUsing command splitter algorithm {0}'.format(self.split_algo)) with Timer('\tRetrieved commands in'): # Split commands # sqlparse packages split function combined with sql_split function if self.split_algo is 'sql_parse': commands = SplitCommands(self.sql_script).sql_parse # Split on every ';' (unreliable) elif self.split_algo is 'simple_split': commands = SplitCommands(self.sql_script).simple_split() # sqlparse package without additional splitting elif self.split_algo is 'sql_parse_nosplit': commands = SplitCommands(self.sql_script).sql_parse_nosplit # Parse every char of the SQL commands and determine breakpoints elif self.split_algo is 'sql_split': commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False) else: commands = SplitCommands(self.sql_script).sql_split(disable_tqdm=False) # remove dbo. prefixes from table names cleaned_commands = [com.replace("dbo.", '') for com in commands] return cleaned_commands
[ "def", "commands", "(", "self", ")", ":", "# Retrieve all commands via split function or splitting on ';'", "print", "(", "'\\tRetrieving commands from'", ",", "self", ".", "sql_script", ")", "print", "(", "'\\tUsing command splitter algorithm {0}'", ".", "format", "(", "se...
Fetch individual SQL commands from a SQL commands containing many commands. :return: List of commands
[ "Fetch", "individual", "SQL", "commands", "from", "a", "SQL", "commands", "containing", "many", "commands", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L43-L75
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/execute.py
Execute.execute
def execute(self, commands=None, ignored_commands=('DROP', 'UNLOCK', 'LOCK'), execute_fails=True, max_executions=MAX_EXECUTION_ATTEMPTS): """ Sequentially execute a list of SQL commands. Check if commands property has already been fetched, if so use the fetched_commands rather than getting them again. :param commands: List of SQL commands :param ignored_commands: Boolean, skip SQL commands that begin with 'DROP' :param execute_fails: Boolean, attempt to execute failed commands again :param max_executions: Int, max number of attempted executions :return: Successful and failed commands """ # Break connection self._MySQL.disconnect() self._execute_iters += 1 if self._execute_iters > 0: print('\tExecuting commands attempt #{0}'.format(self._execute_iters)) # Retrieve commands from sql_script if no commands are provided commands = self.commands if not commands else commands # Remove 'DROP' commands if ignored_commands: commands = filter_commands(commands, ignored_commands) # Reestablish connection self._MySQL.reconnect() # Execute list of commands fail, success = self._execute_commands(commands) # Dump failed commands to text files print('\t' + str(success), 'successful commands') if len(fail) > 1 and self._dump_fails: # Dump failed commands dump_dir = self.dump_commands(fail) # Execute failed commands if execute_fails and self._execute_iters < max_executions: return self._execute_commands_from_dir(dump_dir) return fail, success
python
def execute(self, commands=None, ignored_commands=('DROP', 'UNLOCK', 'LOCK'), execute_fails=True, max_executions=MAX_EXECUTION_ATTEMPTS): """ Sequentially execute a list of SQL commands. Check if commands property has already been fetched, if so use the fetched_commands rather than getting them again. :param commands: List of SQL commands :param ignored_commands: Boolean, skip SQL commands that begin with 'DROP' :param execute_fails: Boolean, attempt to execute failed commands again :param max_executions: Int, max number of attempted executions :return: Successful and failed commands """ # Break connection self._MySQL.disconnect() self._execute_iters += 1 if self._execute_iters > 0: print('\tExecuting commands attempt #{0}'.format(self._execute_iters)) # Retrieve commands from sql_script if no commands are provided commands = self.commands if not commands else commands # Remove 'DROP' commands if ignored_commands: commands = filter_commands(commands, ignored_commands) # Reestablish connection self._MySQL.reconnect() # Execute list of commands fail, success = self._execute_commands(commands) # Dump failed commands to text files print('\t' + str(success), 'successful commands') if len(fail) > 1 and self._dump_fails: # Dump failed commands dump_dir = self.dump_commands(fail) # Execute failed commands if execute_fails and self._execute_iters < max_executions: return self._execute_commands_from_dir(dump_dir) return fail, success
[ "def", "execute", "(", "self", ",", "commands", "=", "None", ",", "ignored_commands", "=", "(", "'DROP'", ",", "'UNLOCK'", ",", "'LOCK'", ")", ",", "execute_fails", "=", "True", ",", "max_executions", "=", "MAX_EXECUTION_ATTEMPTS", ")", ":", "# Break connectio...
Sequentially execute a list of SQL commands. Check if commands property has already been fetched, if so use the fetched_commands rather than getting them again. :param commands: List of SQL commands :param ignored_commands: Boolean, skip SQL commands that begin with 'DROP' :param execute_fails: Boolean, attempt to execute failed commands again :param max_executions: Int, max number of attempted executions :return: Successful and failed commands
[ "Sequentially", "execute", "a", "list", "of", "SQL", "commands", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L77-L119
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/execute.py
Execute._execute_commands
def _execute_commands(self, commands, fails=False): """Execute commands and get list of failed commands and count of successful commands""" # Confirm that prepare_statements flag is on if self._prep_statements: prepared_commands = [prepare_sql(c) for c in tqdm(commands, total=len(commands), desc='Prepping SQL Commands')] print('\tCommands prepared', len(prepared_commands)) else: prepared_commands = commands desc = 'Executing SQL Commands' if not fails else 'Executing Failed SQL Commands' fail, success = [], 0 for command in tqdm(prepared_commands, total=len(prepared_commands), desc=desc): # Attempt to execute command and skip command if error is raised try: self._MySQL.executemore(command) success += 1 except: fail.append(command) self._MySQL._commit() return fail, success
python
def _execute_commands(self, commands, fails=False): """Execute commands and get list of failed commands and count of successful commands""" # Confirm that prepare_statements flag is on if self._prep_statements: prepared_commands = [prepare_sql(c) for c in tqdm(commands, total=len(commands), desc='Prepping SQL Commands')] print('\tCommands prepared', len(prepared_commands)) else: prepared_commands = commands desc = 'Executing SQL Commands' if not fails else 'Executing Failed SQL Commands' fail, success = [], 0 for command in tqdm(prepared_commands, total=len(prepared_commands), desc=desc): # Attempt to execute command and skip command if error is raised try: self._MySQL.executemore(command) success += 1 except: fail.append(command) self._MySQL._commit() return fail, success
[ "def", "_execute_commands", "(", "self", ",", "commands", ",", "fails", "=", "False", ")", ":", "# Confirm that prepare_statements flag is on", "if", "self", ".", "_prep_statements", ":", "prepared_commands", "=", "[", "prepare_sql", "(", "c", ")", "for", "c", "...
Execute commands and get list of failed commands and count of successful commands
[ "Execute", "commands", "and", "get", "list", "of", "failed", "commands", "and", "count", "of", "successful", "commands" ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L121-L141
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/execute.py
Execute._execute_commands_from_dir
def _execute_commands_from_dir(self, directory): """Re-attempt to split and execute the failed commands""" # Get file paths and contents commands = get_commands_from_dir(directory) # Execute failed commands again print('\tAttempting to execute {0} failed commands'.format(len(commands))) return self.execute(commands, ignored_commands=None, execute_fails=True)
python
def _execute_commands_from_dir(self, directory): """Re-attempt to split and execute the failed commands""" # Get file paths and contents commands = get_commands_from_dir(directory) # Execute failed commands again print('\tAttempting to execute {0} failed commands'.format(len(commands))) return self.execute(commands, ignored_commands=None, execute_fails=True)
[ "def", "_execute_commands_from_dir", "(", "self", ",", "directory", ")", ":", "# Get file paths and contents", "commands", "=", "get_commands_from_dir", "(", "directory", ")", "# Execute failed commands again", "print", "(", "'\\tAttempting to execute {0} failed commands'", "."...
Re-attempt to split and execute the failed commands
[ "Re", "-", "attempt", "to", "split", "and", "execute", "the", "failed", "commands" ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L143-L150
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/execute.py
Execute.dump_commands
def dump_commands(self, commands): """Dump commands wrapper for external access.""" # Get base directory directory = os.path.join(os.path.dirname(self.sql_script), 'fails') # Get file name to be used for folder name fname = os.path.basename(self.sql_script.rsplit('.')[0]) return dump_commands(commands, directory, fname)
python
def dump_commands(self, commands): """Dump commands wrapper for external access.""" # Get base directory directory = os.path.join(os.path.dirname(self.sql_script), 'fails') # Get file name to be used for folder name fname = os.path.basename(self.sql_script.rsplit('.')[0]) return dump_commands(commands, directory, fname)
[ "def", "dump_commands", "(", "self", ",", "commands", ")", ":", "# Get base directory", "directory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "sql_script", ")", ",", "'fails'", ")", "# Get file name to ...
Dump commands wrapper for external access.
[ "Dump", "commands", "wrapper", "for", "external", "access", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/execute.py#L152-L160
markbaas/python-iresolve
iresolve.py
suppress_output
def suppress_output(reverse=False): """ Suppress output """ if reverse: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ else: sys.stdout = os.devnull sys.stderr = os.devnull
python
def suppress_output(reverse=False): """ Suppress output """ if reverse: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ else: sys.stdout = os.devnull sys.stderr = os.devnull
[ "def", "suppress_output", "(", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "sys", ".", "stdout", "=", "sys", ".", "__stdout__", "sys", ".", "stderr", "=", "sys", ".", "__stderr__", "else", ":", "sys", ".", "stdout", "=", "os", ".", "devn...
Suppress output
[ "Suppress", "output" ]
train
https://github.com/markbaas/python-iresolve/blob/ba91e37221e91265e4ac5dbc6e8f5cffa955a04f/iresolve.py#L37-L46
markbaas/python-iresolve
iresolve.py
get_unresolved_variables
def get_unresolved_variables(f): """ Gets unresolved vars from file """ reporter = RReporter() checkPath(f, reporter=reporter) return dict(reporter.messages)
python
def get_unresolved_variables(f): """ Gets unresolved vars from file """ reporter = RReporter() checkPath(f, reporter=reporter) return dict(reporter.messages)
[ "def", "get_unresolved_variables", "(", "f", ")", ":", "reporter", "=", "RReporter", "(", ")", "checkPath", "(", "f", ",", "reporter", "=", "reporter", ")", "return", "dict", "(", "reporter", ".", "messages", ")" ]
Gets unresolved vars from file
[ "Gets", "unresolved", "vars", "from", "file" ]
train
https://github.com/markbaas/python-iresolve/blob/ba91e37221e91265e4ac5dbc6e8f5cffa955a04f/iresolve.py#L49-L55
markbaas/python-iresolve
iresolve.py
index_modules
def index_modules(idx=None, path=None): """ Indexes objs from all modules """ suppress_output() modules = defaultdict(list) pkglist = pkgutil.walk_packages(onerror=lambda x: True) print(pkglist) if path: pkglist = pkgutil.walk_packages(path, onerror=lambda x: True) for modl, name, ispkg in pkglist: try: path = os.path.join(modl.path, name.split('.')[-1]) except AttributeError: # Triggered on zipimport.zipimporter continue if os.path.isdir(path): path = os.path.join(path, '__init__') path += '.py' objs = [] if os.path.exists(path): try: objs = read_objs_from_path(path) except: continue elif not re.search(MODULE_BLACKLIST, name): try: mod = __import__(name) objs = [k for k in dir(mod) if not k.startswith('__')] except: continue else: continue for obj in objs: if name not in modules[obj]: modules[obj].append(name) suppress_output(True) return merge_dicts(idx, dict(modules))
python
def index_modules(idx=None, path=None): """ Indexes objs from all modules """ suppress_output() modules = defaultdict(list) pkglist = pkgutil.walk_packages(onerror=lambda x: True) print(pkglist) if path: pkglist = pkgutil.walk_packages(path, onerror=lambda x: True) for modl, name, ispkg in pkglist: try: path = os.path.join(modl.path, name.split('.')[-1]) except AttributeError: # Triggered on zipimport.zipimporter continue if os.path.isdir(path): path = os.path.join(path, '__init__') path += '.py' objs = [] if os.path.exists(path): try: objs = read_objs_from_path(path) except: continue elif not re.search(MODULE_BLACKLIST, name): try: mod = __import__(name) objs = [k for k in dir(mod) if not k.startswith('__')] except: continue else: continue for obj in objs: if name not in modules[obj]: modules[obj].append(name) suppress_output(True) return merge_dicts(idx, dict(modules))
[ "def", "index_modules", "(", "idx", "=", "None", ",", "path", "=", "None", ")", ":", "suppress_output", "(", ")", "modules", "=", "defaultdict", "(", "list", ")", "pkglist", "=", "pkgutil", ".", "walk_packages", "(", "onerror", "=", "lambda", "x", ":", ...
Indexes objs from all modules
[ "Indexes", "objs", "from", "all", "modules" ]
train
https://github.com/markbaas/python-iresolve/blob/ba91e37221e91265e4ac5dbc6e8f5cffa955a04f/iresolve.py#L67-L108
markbaas/python-iresolve
iresolve.py
get_suggestions
def get_suggestions(idx, unresolved): """ Returns suggestions """ result = {} for u, lines in unresolved.items(): paths = idx.get(u) if paths: result[u] = {'paths': paths, 'lineno': lines} return result
python
def get_suggestions(idx, unresolved): """ Returns suggestions """ result = {} for u, lines in unresolved.items(): paths = idx.get(u) if paths: result[u] = {'paths': paths, 'lineno': lines} return result
[ "def", "get_suggestions", "(", "idx", ",", "unresolved", ")", ":", "result", "=", "{", "}", "for", "u", ",", "lines", "in", "unresolved", ".", "items", "(", ")", ":", "paths", "=", "idx", ".", "get", "(", "u", ")", "if", "paths", ":", "result", "...
Returns suggestions
[ "Returns", "suggestions" ]
train
https://github.com/markbaas/python-iresolve/blob/ba91e37221e91265e4ac5dbc6e8f5cffa955a04f/iresolve.py#L124-L133
20c/vodka
vodka/app.py
WebApplication.includes
def includes(self): """ return includes from config """ r = dict([(k, sorted(copy.deepcopy(v).values(), key=lambda x:x.get("order",0))) for k,v in self.get_config("includes").items()]) if self.version is not None: for k,v in r.items(): for j in v: j["path"] = self.versioned_url(j["path"]) return r
python
def includes(self): """ return includes from config """ r = dict([(k, sorted(copy.deepcopy(v).values(), key=lambda x:x.get("order",0))) for k,v in self.get_config("includes").items()]) if self.version is not None: for k,v in r.items(): for j in v: j["path"] = self.versioned_url(j["path"]) return r
[ "def", "includes", "(", "self", ")", ":", "r", "=", "dict", "(", "[", "(", "k", ",", "sorted", "(", "copy", ".", "deepcopy", "(", "v", ")", ".", "values", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "get", "(", "\"order\"", ",", ...
return includes from config
[ "return", "includes", "from", "config" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/app.py#L286-L293
20c/vodka
vodka/app.py
WebApplication.render
def render(self, tmpl_name, request_env): """ Render the specified template and return the output. Args: tmpl_name (str): file name of the template request_env (dict): request environment Returns: str - the rendered template """ return super(WebApplication, self).render(tmpl_name, request_env)
python
def render(self, tmpl_name, request_env): """ Render the specified template and return the output. Args: tmpl_name (str): file name of the template request_env (dict): request environment Returns: str - the rendered template """ return super(WebApplication, self).render(tmpl_name, request_env)
[ "def", "render", "(", "self", ",", "tmpl_name", ",", "request_env", ")", ":", "return", "super", "(", "WebApplication", ",", "self", ")", ".", "render", "(", "tmpl_name", ",", "request_env", ")" ]
Render the specified template and return the output. Args: tmpl_name (str): file name of the template request_env (dict): request environment Returns: str - the rendered template
[ "Render", "the", "specified", "template", "and", "return", "the", "output", "." ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/app.py#L296-L308
PGower/PyCanvas
pycanvas/apis/submissions.py
submit_assignment_courses
def submit_assignment_courses(self, course_id, assignment_id, submission_submission_type, comment_text_comment=None, submission_body=None, submission_file_ids=None, submission_media_comment_id=None, submission_media_comment_type=None, submission_url=None): """ Submit an assignment. Make a submission for an assignment. You must be enrolled as a student in the course/section to do this. All online turn-in submission types are supported in this API. However, there are a few things that are not yet supported: * Files can be submitted based on a file ID of a user or group file. However, there is no API yet for listing the user and group files, or uploading new files via the API. A file upload API is coming soon. * Media comments can be submitted, however, there is no API yet for creating a media comment to submit. * Integration with Google Docs is not yet supported. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - comment[text_comment] """Include a textual comment with the submission.""" if comment_text_comment is not None: data["comment[text_comment]"] = comment_text_comment # REQUIRED - submission[submission_type] """The type of submission being made. The assignment submission_types must include this submission type as an allowed option, or the submission will be rejected with a 400 error. The submission_type given determines which of the following parameters is used. For instance, to submit a URL, submission [submission_type] must be set to "online_url", otherwise the submission [url] parameter will be ignored.""" self._validate_enum(submission_submission_type, ["online_text_entry", "online_url", "online_upload", "media_recording", "basic_lti_launch"]) data["submission[submission_type]"] = submission_submission_type # OPTIONAL - submission[body] """Submit the assignment as an HTML document snippet. Note this HTML snippet will be sanitized using the same ruleset as a submission made from the Canvas web UI. The sanitized HTML will be returned in the response as the submission body. Requires a submission_type of "online_text_entry".""" if submission_body is not None: data["submission[body]"] = submission_body # OPTIONAL - submission[url] """Submit the assignment as a URL. The URL scheme must be "http" or "https", no "ftp" or other URL schemes are allowed. If no scheme is given (e.g. "www.example.com") then "http" will be assumed. Requires a submission_type of "online_url" or "basic_lti_launch".""" if submission_url is not None: data["submission[url]"] = submission_url # OPTIONAL - submission[file_ids] """Submit the assignment as a set of one or more previously uploaded files residing in the submitting user's files section (or the group's files section, for group assignments). To upload a new file to submit, see the submissions {api:SubmissionsApiController#create_file Upload a file API}. Requires a submission_type of "online_upload".""" if submission_file_ids is not None: data["submission[file_ids]"] = submission_file_ids # OPTIONAL - submission[media_comment_id] """The media comment id to submit. Media comment ids can be submitted via this API, however, note that there is not yet an API to generate or list existing media comments, so this functionality is currently of limited use. Requires a submission_type of "media_recording".""" if submission_media_comment_id is not None: data["submission[media_comment_id]"] = submission_media_comment_id # OPTIONAL - submission[media_comment_type] """The type of media comment being submitted.""" if submission_media_comment_type is not None: self._validate_enum(submission_media_comment_type, ["audio", "video"]) data["submission[media_comment_type]"] = submission_media_comment_type self.logger.debug("POST /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, no_data=True)
python
def submit_assignment_courses(self, course_id, assignment_id, submission_submission_type, comment_text_comment=None, submission_body=None, submission_file_ids=None, submission_media_comment_id=None, submission_media_comment_type=None, submission_url=None): """ Submit an assignment. Make a submission for an assignment. You must be enrolled as a student in the course/section to do this. All online turn-in submission types are supported in this API. However, there are a few things that are not yet supported: * Files can be submitted based on a file ID of a user or group file. However, there is no API yet for listing the user and group files, or uploading new files via the API. A file upload API is coming soon. * Media comments can be submitted, however, there is no API yet for creating a media comment to submit. * Integration with Google Docs is not yet supported. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - comment[text_comment] """Include a textual comment with the submission.""" if comment_text_comment is not None: data["comment[text_comment]"] = comment_text_comment # REQUIRED - submission[submission_type] """The type of submission being made. The assignment submission_types must include this submission type as an allowed option, or the submission will be rejected with a 400 error. The submission_type given determines which of the following parameters is used. For instance, to submit a URL, submission [submission_type] must be set to "online_url", otherwise the submission [url] parameter will be ignored.""" self._validate_enum(submission_submission_type, ["online_text_entry", "online_url", "online_upload", "media_recording", "basic_lti_launch"]) data["submission[submission_type]"] = submission_submission_type # OPTIONAL - submission[body] """Submit the assignment as an HTML document snippet. Note this HTML snippet will be sanitized using the same ruleset as a submission made from the Canvas web UI. The sanitized HTML will be returned in the response as the submission body. Requires a submission_type of "online_text_entry".""" if submission_body is not None: data["submission[body]"] = submission_body # OPTIONAL - submission[url] """Submit the assignment as a URL. The URL scheme must be "http" or "https", no "ftp" or other URL schemes are allowed. If no scheme is given (e.g. "www.example.com") then "http" will be assumed. Requires a submission_type of "online_url" or "basic_lti_launch".""" if submission_url is not None: data["submission[url]"] = submission_url # OPTIONAL - submission[file_ids] """Submit the assignment as a set of one or more previously uploaded files residing in the submitting user's files section (or the group's files section, for group assignments). To upload a new file to submit, see the submissions {api:SubmissionsApiController#create_file Upload a file API}. Requires a submission_type of "online_upload".""" if submission_file_ids is not None: data["submission[file_ids]"] = submission_file_ids # OPTIONAL - submission[media_comment_id] """The media comment id to submit. Media comment ids can be submitted via this API, however, note that there is not yet an API to generate or list existing media comments, so this functionality is currently of limited use. Requires a submission_type of "media_recording".""" if submission_media_comment_id is not None: data["submission[media_comment_id]"] = submission_media_comment_id # OPTIONAL - submission[media_comment_type] """The type of media comment being submitted.""" if submission_media_comment_type is not None: self._validate_enum(submission_media_comment_type, ["audio", "video"]) data["submission[media_comment_type]"] = submission_media_comment_type self.logger.debug("POST /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, no_data=True)
[ "def", "submit_assignment_courses", "(", "self", ",", "course_id", ",", "assignment_id", ",", "submission_submission_type", ",", "comment_text_comment", "=", "None", ",", "submission_body", "=", "None", ",", "submission_file_ids", "=", "None", ",", "submission_media_com...
Submit an assignment. Make a submission for an assignment. You must be enrolled as a student in the course/section to do this. All online turn-in submission types are supported in this API. However, there are a few things that are not yet supported: * Files can be submitted based on a file ID of a user or group file. However, there is no API yet for listing the user and group files, or uploading new files via the API. A file upload API is coming soon. * Media comments can be submitted, however, there is no API yet for creating a media comment to submit. * Integration with Google Docs is not yet supported.
[ "Submit", "an", "assignment", ".", "Make", "a", "submission", "for", "an", "assignment", ".", "You", "must", "be", "enrolled", "as", "a", "student", "in", "the", "course", "/", "section", "to", "do", "this", ".", "All", "online", "turn", "-", "in", "su...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L19-L104
PGower/PyCanvas
pycanvas/apis/submissions.py
list_assignment_submissions_courses
def list_assignment_submissions_courses(self, course_id, assignment_id, grouped=None, include=None): """ List assignment submissions. Get all existing submissions for an assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - include """Associations to include with the group. "group" will add group_id and group_name.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "visibility", "course", "user", "group"]) params["include"] = include # OPTIONAL - grouped """If this argument is true, the response will be grouped by student groups.""" if grouped is not None: params["grouped"] = grouped self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
python
def list_assignment_submissions_courses(self, course_id, assignment_id, grouped=None, include=None): """ List assignment submissions. Get all existing submissions for an assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - include """Associations to include with the group. "group" will add group_id and group_name.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "visibility", "course", "user", "group"]) params["include"] = include # OPTIONAL - grouped """If this argument is true, the response will be grouped by student groups.""" if grouped is not None: params["grouped"] = grouped self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_assignment_submissions_courses", "(", "self", ",", "course_id", ",", "assignment_id", ",", "grouped", "=", "None", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH...
List assignment submissions. Get all existing submissions for an assignment.
[ "List", "assignment", "submissions", ".", "Get", "all", "existing", "submissions", "for", "an", "assignment", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L193-L223
PGower/PyCanvas
pycanvas/apis/submissions.py
list_assignment_submissions_sections
def list_assignment_submissions_sections(self, section_id, assignment_id, grouped=None, include=None): """ List assignment submissions. Get all existing submissions for an assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - include """Associations to include with the group. "group" will add group_id and group_name.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "visibility", "course", "user", "group"]) params["include"] = include # OPTIONAL - grouped """If this argument is true, the response will be grouped by student groups.""" if grouped is not None: params["grouped"] = grouped self.logger.debug("GET /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
python
def list_assignment_submissions_sections(self, section_id, assignment_id, grouped=None, include=None): """ List assignment submissions. Get all existing submissions for an assignment. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # OPTIONAL - include """Associations to include with the group. "group" will add group_id and group_name.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "visibility", "course", "user", "group"]) params["include"] = include # OPTIONAL - grouped """If this argument is true, the response will be grouped by student groups.""" if grouped is not None: params["grouped"] = grouped self.logger.debug("GET /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_assignment_submissions_sections", "(", "self", ",", "section_id", ",", "assignment_id", ",", "grouped", "=", "None", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PA...
List assignment submissions. Get all existing submissions for an assignment.
[ "List", "assignment", "submissions", ".", "Get", "all", "existing", "submissions", "for", "an", "assignment", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L225-L255
PGower/PyCanvas
pycanvas/apis/submissions.py
list_submissions_for_multiple_assignments_courses
def list_submissions_for_multiple_assignments_courses(self, course_id, assignment_ids=None, grading_period_id=None, grouped=None, include=None, order=None, order_direction=None, student_ids=None): """ List submissions for multiple assignments. Get all existing submissions for a given set of students and assignments. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - student_ids """List of student ids to return submissions for. If this argument is omitted, return submissions for the calling user. Students may only list their own submissions. Observers may only list those of associated students. The special id "all" will return submissions for all students in the course/section as appropriate.""" if student_ids is not None: params["student_ids"] = student_ids # OPTIONAL - assignment_ids """List of assignments to return submissions for. If none are given, submissions for all assignments are returned.""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids # OPTIONAL - grouped """If this argument is present, the response will be grouped by student, rather than a flat array of submissions.""" if grouped is not None: params["grouped"] = grouped # OPTIONAL - grading_period_id """The id of the grading period in which submissions are being requested (Requires the Multiple Grading Periods account feature turned on)""" if grading_period_id is not None: params["grading_period_id"] = grading_period_id # OPTIONAL - order """The order submissions will be returned in. Defaults to "id". Doesn't affect results for "grouped" mode.""" if order is not None: self._validate_enum(order, ["id", "graded_at"]) params["order"] = order # OPTIONAL - order_direction """Determines whether ordered results are retured in ascending or descending order. Defaults to "ascending". Doesn't affect results for "grouped" mode.""" if order_direction is not None: self._validate_enum(order_direction, ["ascending", "descending"]) params["order_direction"] = order_direction # OPTIONAL - include """Associations to include with the group. `total_scores` requires the `grouped` argument.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "total_scores", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/students/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/students/submissions".format(**path), data=data, params=params, no_data=True)
python
def list_submissions_for_multiple_assignments_courses(self, course_id, assignment_ids=None, grading_period_id=None, grouped=None, include=None, order=None, order_direction=None, student_ids=None): """ List submissions for multiple assignments. Get all existing submissions for a given set of students and assignments. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - student_ids """List of student ids to return submissions for. If this argument is omitted, return submissions for the calling user. Students may only list their own submissions. Observers may only list those of associated students. The special id "all" will return submissions for all students in the course/section as appropriate.""" if student_ids is not None: params["student_ids"] = student_ids # OPTIONAL - assignment_ids """List of assignments to return submissions for. If none are given, submissions for all assignments are returned.""" if assignment_ids is not None: params["assignment_ids"] = assignment_ids # OPTIONAL - grouped """If this argument is present, the response will be grouped by student, rather than a flat array of submissions.""" if grouped is not None: params["grouped"] = grouped # OPTIONAL - grading_period_id """The id of the grading period in which submissions are being requested (Requires the Multiple Grading Periods account feature turned on)""" if grading_period_id is not None: params["grading_period_id"] = grading_period_id # OPTIONAL - order """The order submissions will be returned in. Defaults to "id". Doesn't affect results for "grouped" mode.""" if order is not None: self._validate_enum(order, ["id", "graded_at"]) params["order"] = order # OPTIONAL - order_direction """Determines whether ordered results are retured in ascending or descending order. Defaults to "ascending". Doesn't affect results for "grouped" mode.""" if order_direction is not None: self._validate_enum(order_direction, ["ascending", "descending"]) params["order_direction"] = order_direction # OPTIONAL - include """Associations to include with the group. `total_scores` requires the `grouped` argument.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "total_scores", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/students/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/students/submissions".format(**path), data=data, params=params, no_data=True)
[ "def", "list_submissions_for_multiple_assignments_courses", "(", "self", ",", "course_id", ",", "assignment_ids", "=", "None", ",", "grading_period_id", "=", "None", ",", "grouped", "=", "None", ",", "include", "=", "None", ",", "order", "=", "None", ",", "order...
List submissions for multiple assignments. Get all existing submissions for a given set of students and assignments.
[ "List", "submissions", "for", "multiple", "assignments", ".", "Get", "all", "existing", "submissions", "for", "a", "given", "set", "of", "students", "and", "assignments", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L257-L320
PGower/PyCanvas
pycanvas/apis/submissions.py
get_single_submission_courses
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
python
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_submission_courses", "(", "self", ",", "user_id", ",", "course_id", ",", "assignment_id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\...
Get a single submission. Get a single submission, based on user id.
[ "Get", "a", "single", "submission", ".", "Get", "a", "single", "submission", "based", "on", "user", "id", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L387-L416
PGower/PyCanvas
pycanvas/apis/submissions.py
get_single_submission_sections
def get_single_submission_sections(self, user_id, section_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
python
def get_single_submission_sections(self, user_id, section_id, assignment_id, include=None): """ Get a single submission. Get a single submission, based on user id. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - include """Associations to include with the group.""" if include is not None: self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"]) params["include"] = include self.logger.debug("GET /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "get_single_submission_sections", "(", "self", ",", "user_id", ",", "section_id", ",", "assignment_id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - section_id\r", "...
Get a single submission. Get a single submission, based on user id.
[ "Get", "a", "single", "submission", ".", "Get", "a", "single", "submission", "based", "on", "user", "id", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L418-L447
PGower/PyCanvas
pycanvas/apis/submissions.py
upload_file_sections
def upload_file_sections(self, user_id, section_id, assignment_id): """ Upload a file. Upload a file to a submission. This API endpoint is the first step in uploading a file to a submission as a student. See the {file:file_uploads.html File Upload Documentation} for details on the file upload workflow. The final step of the file upload workflow will return the attachment data, including the new file id. The caller can then POST to submit the +online_upload+ assignment with these file ids. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("POST /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files".format(**path), data=data, params=params, no_data=True)
python
def upload_file_sections(self, user_id, section_id, assignment_id): """ Upload a file. Upload a file to a submission. This API endpoint is the first step in uploading a file to a submission as a student. See the {file:file_uploads.html File Upload Documentation} for details on the file upload workflow. The final step of the file upload workflow will return the attachment data, including the new file id. The caller can then POST to submit the +online_upload+ assignment with these file ids. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("POST /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files".format(**path), data=data, params=params, no_data=True)
[ "def", "upload_file_sections", "(", "self", ",", "user_id", ",", "section_id", ",", "assignment_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - section_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"section_...
Upload a file. Upload a file to a submission. This API endpoint is the first step in uploading a file to a submission as a student. See the {file:file_uploads.html File Upload Documentation} for details on the file upload workflow. The final step of the file upload workflow will return the attachment data, including the new file id. The caller can then POST to submit the +online_upload+ assignment with these file ids.
[ "Upload", "a", "file", ".", "Upload", "a", "file", "to", "a", "submission", ".", "This", "API", "endpoint", "is", "the", "first", "step", "in", "uploading", "a", "file", "to", "a", "submission", "as", "a", "student", ".", "See", "the", "{", "file", "...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L481-L511
PGower/PyCanvas
pycanvas/apis/submissions.py
grade_or_comment_on_submission_courses
def grade_or_comment_on_submission_courses(self, user_id, course_id, assignment_id, comment_file_ids=None, comment_group_comment=None, comment_media_comment_id=None, comment_media_comment_type=None, comment_text_comment=None, include_visibility=None, rubric_assessment=None, submission_excuse=None, submission_posted_grade=None): """ Grade or comment on a submission. Comment on and/or update the grading for a student's assignment submission. If any submission or rubric_assessment arguments are provided, the user must have permission to manage grades in the appropriate context (course or section). """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - comment[text_comment] """Add a textual comment to the submission.""" if comment_text_comment is not None: data["comment[text_comment]"] = comment_text_comment # OPTIONAL - comment[group_comment] """Whether or not this comment should be sent to the entire group (defaults to false). Ignored if this is not a group assignment or if no text_comment is provided.""" if comment_group_comment is not None: data["comment[group_comment]"] = comment_group_comment # OPTIONAL - comment[media_comment_id] """Add an audio/video comment to the submission. Media comments can be added via this API, however, note that there is not yet an API to generate or list existing media comments, so this functionality is currently of limited use.""" if comment_media_comment_id is not None: data["comment[media_comment_id]"] = comment_media_comment_id # OPTIONAL - comment[media_comment_type] """The type of media comment being added.""" if comment_media_comment_type is not None: self._validate_enum(comment_media_comment_type, ["audio", "video"]) data["comment[media_comment_type]"] = comment_media_comment_type # OPTIONAL - comment[file_ids] """Attach files to this comment that were previously uploaded using the Submission Comment API's files action""" if comment_file_ids is not None: data["comment[file_ids]"] = comment_file_ids # OPTIONAL - include[visibility] """Whether this assignment is visible to the owner of the submission""" if include_visibility is not None: data["include[visibility]"] = include_visibility # OPTIONAL - submission[posted_grade] """Assign a score to the submission, updating both the "score" and "grade" fields on the submission record. This parameter can be passed in a few different formats: points:: A floating point or integral value, such as "13.5". The grade will be interpreted directly as the score of the assignment. Values above assignment.points_possible are allowed, for awarding extra credit. percentage:: A floating point value appended with a percent sign, such as "40%". The grade will be interpreted as a percentage score on the assignment, where 100% == assignment.points_possible. Values above 100% are allowed, for awarding extra credit. letter grade:: A letter grade, following the assignment's defined letter grading scheme. For example, "A-". The resulting score will be the high end of the defined range for the letter grade. For instance, if "B" is defined as 86% to 84%, a letter grade of "B" will be worth 86%. The letter grade will be rejected if the assignment does not have a defined letter grading scheme. For more fine-grained control of scores, pass in points or percentage rather than the letter grade. "pass/complete/fail/incomplete":: A string value of "pass" or "complete" will give a score of 100%. "fail" or "incomplete" will give a score of 0. Note that assignments with grading_type of "pass_fail" can only be assigned a score of 0 or assignment.points_possible, nothing inbetween. If a posted_grade in the "points" or "percentage" format is sent, the grade will only be accepted if the grade equals one of those two values.""" if submission_posted_grade is not None: data["submission[posted_grade]"] = submission_posted_grade # OPTIONAL - submission[excuse] """Sets the "excused" status of an assignment.""" if submission_excuse is not None: data["submission[excuse]"] = submission_excuse # OPTIONAL - rubric_assessment """Assign a rubric assessment to this assignment submission. The sub-parameters here depend on the rubric for the assignment. The general format is, for each row in the rubric: The points awarded for this row. rubric_assessment[criterion_id][points] Comments to add for this row. rubric_assessment[criterion_id][comments] For example, if the assignment rubric is (in JSON format): !!!javascript [ { 'id': 'crit1', 'points': 10, 'description': 'Criterion 1', 'ratings': [ { 'description': 'Good', 'points': 10 }, { 'description': 'Poor', 'points': 3 } ] }, { 'id': 'crit2', 'points': 5, 'description': 'Criterion 2', 'ratings': [ { 'description': 'Complete', 'points': 5 }, { 'description': 'Incomplete', 'points': 0 } ] } ] Then a possible set of values for rubric_assessment would be: rubric_assessment[crit1][points]=3&rubric_assessment[crit2][points]=5&rubric_assessment[crit2][comments]=Well%20Done.""" if rubric_assessment is not None: data["rubric_assessment"] = rubric_assessment self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
python
def grade_or_comment_on_submission_courses(self, user_id, course_id, assignment_id, comment_file_ids=None, comment_group_comment=None, comment_media_comment_id=None, comment_media_comment_type=None, comment_text_comment=None, include_visibility=None, rubric_assessment=None, submission_excuse=None, submission_posted_grade=None): """ Grade or comment on a submission. Comment on and/or update the grading for a student's assignment submission. If any submission or rubric_assessment arguments are provided, the user must have permission to manage grades in the appropriate context (course or section). """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - comment[text_comment] """Add a textual comment to the submission.""" if comment_text_comment is not None: data["comment[text_comment]"] = comment_text_comment # OPTIONAL - comment[group_comment] """Whether or not this comment should be sent to the entire group (defaults to false). Ignored if this is not a group assignment or if no text_comment is provided.""" if comment_group_comment is not None: data["comment[group_comment]"] = comment_group_comment # OPTIONAL - comment[media_comment_id] """Add an audio/video comment to the submission. Media comments can be added via this API, however, note that there is not yet an API to generate or list existing media comments, so this functionality is currently of limited use.""" if comment_media_comment_id is not None: data["comment[media_comment_id]"] = comment_media_comment_id # OPTIONAL - comment[media_comment_type] """The type of media comment being added.""" if comment_media_comment_type is not None: self._validate_enum(comment_media_comment_type, ["audio", "video"]) data["comment[media_comment_type]"] = comment_media_comment_type # OPTIONAL - comment[file_ids] """Attach files to this comment that were previously uploaded using the Submission Comment API's files action""" if comment_file_ids is not None: data["comment[file_ids]"] = comment_file_ids # OPTIONAL - include[visibility] """Whether this assignment is visible to the owner of the submission""" if include_visibility is not None: data["include[visibility]"] = include_visibility # OPTIONAL - submission[posted_grade] """Assign a score to the submission, updating both the "score" and "grade" fields on the submission record. This parameter can be passed in a few different formats: points:: A floating point or integral value, such as "13.5". The grade will be interpreted directly as the score of the assignment. Values above assignment.points_possible are allowed, for awarding extra credit. percentage:: A floating point value appended with a percent sign, such as "40%". The grade will be interpreted as a percentage score on the assignment, where 100% == assignment.points_possible. Values above 100% are allowed, for awarding extra credit. letter grade:: A letter grade, following the assignment's defined letter grading scheme. For example, "A-". The resulting score will be the high end of the defined range for the letter grade. For instance, if "B" is defined as 86% to 84%, a letter grade of "B" will be worth 86%. The letter grade will be rejected if the assignment does not have a defined letter grading scheme. For more fine-grained control of scores, pass in points or percentage rather than the letter grade. "pass/complete/fail/incomplete":: A string value of "pass" or "complete" will give a score of 100%. "fail" or "incomplete" will give a score of 0. Note that assignments with grading_type of "pass_fail" can only be assigned a score of 0 or assignment.points_possible, nothing inbetween. If a posted_grade in the "points" or "percentage" format is sent, the grade will only be accepted if the grade equals one of those two values.""" if submission_posted_grade is not None: data["submission[posted_grade]"] = submission_posted_grade # OPTIONAL - submission[excuse] """Sets the "excused" status of an assignment.""" if submission_excuse is not None: data["submission[excuse]"] = submission_excuse # OPTIONAL - rubric_assessment """Assign a rubric assessment to this assignment submission. The sub-parameters here depend on the rubric for the assignment. The general format is, for each row in the rubric: The points awarded for this row. rubric_assessment[criterion_id][points] Comments to add for this row. rubric_assessment[criterion_id][comments] For example, if the assignment rubric is (in JSON format): !!!javascript [ { 'id': 'crit1', 'points': 10, 'description': 'Criterion 1', 'ratings': [ { 'description': 'Good', 'points': 10 }, { 'description': 'Poor', 'points': 3 } ] }, { 'id': 'crit2', 'points': 5, 'description': 'Criterion 2', 'ratings': [ { 'description': 'Complete', 'points': 5 }, { 'description': 'Incomplete', 'points': 0 } ] } ] Then a possible set of values for rubric_assessment would be: rubric_assessment[crit1][points]=3&rubric_assessment[crit2][points]=5&rubric_assessment[crit2][comments]=Well%20Done.""" if rubric_assessment is not None: data["rubric_assessment"] = rubric_assessment self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "grade_or_comment_on_submission_courses", "(", "self", ",", "user_id", ",", "course_id", ",", "assignment_id", ",", "comment_file_ids", "=", "None", ",", "comment_group_comment", "=", "None", ",", "comment_media_comment_id", "=", "None", ",", "comment_media_comme...
Grade or comment on a submission. Comment on and/or update the grading for a student's assignment submission. If any submission or rubric_assessment arguments are provided, the user must have permission to manage grades in the appropriate context (course or section).
[ "Grade", "or", "comment", "on", "a", "submission", ".", "Comment", "on", "and", "/", "or", "update", "the", "grading", "for", "a", "student", "s", "assignment", "submission", ".", "If", "any", "submission", "or", "rubric_assessment", "arguments", "are", "pro...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/submissions.py#L513-L653
klen/muffin-rest
muffin_rest/filters.py
Filter.filter
def filter(self, collection, data, **kwargs): """Filter given collection.""" ops = self.parse(data) collection = self.apply(collection, ops, **kwargs) return ops, collection
python
def filter(self, collection, data, **kwargs): """Filter given collection.""" ops = self.parse(data) collection = self.apply(collection, ops, **kwargs) return ops, collection
[ "def", "filter", "(", "self", ",", "collection", ",", "data", ",", "*", "*", "kwargs", ")", ":", "ops", "=", "self", ".", "parse", "(", "data", ")", "collection", "=", "self", ".", "apply", "(", "collection", ",", "ops", ",", "*", "*", "kwargs", ...
Filter given collection.
[ "Filter", "given", "collection", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L41-L45
klen/muffin-rest
muffin_rest/filters.py
Filter.parse
def parse(self, data): """Parse operator and value from filter's data.""" val = data.get(self.name, missing) if not isinstance(val, dict): return (self.operators['$eq'], self.field.deserialize(val)), return tuple( ( self.operators[op], (self.field.deserialize(val)) if op not in self.list_ops else [ self.field.deserialize(v) for v in val]) for (op, val) in val.items() if op in self.operators )
python
def parse(self, data): """Parse operator and value from filter's data.""" val = data.get(self.name, missing) if not isinstance(val, dict): return (self.operators['$eq'], self.field.deserialize(val)), return tuple( ( self.operators[op], (self.field.deserialize(val)) if op not in self.list_ops else [ self.field.deserialize(v) for v in val]) for (op, val) in val.items() if op in self.operators )
[ "def", "parse", "(", "self", ",", "data", ")", ":", "val", "=", "data", ".", "get", "(", "self", ".", "name", ",", "missing", ")", "if", "not", "isinstance", "(", "val", ",", "dict", ")", ":", "return", "(", "self", ".", "operators", "[", "'$eq'"...
Parse operator and value from filter's data.
[ "Parse", "operator", "and", "value", "from", "filter", "s", "data", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L47-L59
klen/muffin-rest
muffin_rest/filters.py
Filter.apply
def apply(self, collection, ops, **kwargs): """Apply the filter to collection.""" validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa return [o for o in collection if validator(o)]
python
def apply(self, collection, ops, **kwargs): """Apply the filter to collection.""" validator = lambda obj: all(op(obj, val) for (op, val) in ops) # noqa return [o for o in collection if validator(o)]
[ "def", "apply", "(", "self", ",", "collection", ",", "ops", ",", "*", "*", "kwargs", ")", ":", "validator", "=", "lambda", "obj", ":", "all", "(", "op", "(", "obj", ",", "val", ")", "for", "(", "op", ",", "val", ")", "in", "ops", ")", "# noqa",...
Apply the filter to collection.
[ "Apply", "the", "filter", "to", "collection", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L61-L64
klen/muffin-rest
muffin_rest/filters.py
Filters.convert
def convert(self, args, handler=None): """Prepare filters.""" name = args field = attr = None opts = () if isinstance(args, (list, tuple)): name, *opts = args if opts: attr = opts.pop() if opts: field = opts.pop() if not field and handler and handler.Schema: field = handler.Schema._declared_fields.get(attr or name) or \ self.FILTER_CLASS.field_cls() field.attribute = field.attribute or attr or name return self.FILTER_CLASS(name, attr=attr, field=field, *opts)
python
def convert(self, args, handler=None): """Prepare filters.""" name = args field = attr = None opts = () if isinstance(args, (list, tuple)): name, *opts = args if opts: attr = opts.pop() if opts: field = opts.pop() if not field and handler and handler.Schema: field = handler.Schema._declared_fields.get(attr or name) or \ self.FILTER_CLASS.field_cls() field.attribute = field.attribute or attr or name return self.FILTER_CLASS(name, attr=attr, field=field, *opts)
[ "def", "convert", "(", "self", ",", "args", ",", "handler", "=", "None", ")", ":", "name", "=", "args", "field", "=", "attr", "=", "None", "opts", "=", "(", ")", "if", "isinstance", "(", "args", ",", "(", "list", ",", "tuple", ")", ")", ":", "n...
Prepare filters.
[ "Prepare", "filters", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L78-L94
klen/muffin-rest
muffin_rest/filters.py
Filters.filter
def filter(self, data, collection, **kwargs): """Filter given collection.""" if not data or self.filters is None: return None, collection filters = {} for f in self.filters: if f.name not in data: continue ops, collection = f.filter(collection, data, **kwargs) filters[f.name] = ops return filters, collection
python
def filter(self, data, collection, **kwargs): """Filter given collection.""" if not data or self.filters is None: return None, collection filters = {} for f in self.filters: if f.name not in data: continue ops, collection = f.filter(collection, data, **kwargs) filters[f.name] = ops return filters, collection
[ "def", "filter", "(", "self", ",", "data", ",", "collection", ",", "*", "*", "kwargs", ")", ":", "if", "not", "data", "or", "self", ".", "filters", "is", "None", ":", "return", "None", ",", "collection", "filters", "=", "{", "}", "for", "f", "in", ...
Filter given collection.
[ "Filter", "given", "collection", "." ]
train
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L96-L108
theonion/django-bulbs
bulbs/content/south_migrations/0004_add_groups.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." PERM_CONF = { "publish_content": "Can publish content", "publish_own_content": "Can publish own content", "change_content": "Can change content", "promote_content": "Can promote content" } GROUP_CONF = dict( contributor=(), author=("publish_own_content",), editor=( "publish_content", "change_content", "promote_content", ), admin=( "publish_content", "change_content", "promote_content", ) ) content_ct, _ = orm["contenttypes.ContentType"].objects.get_or_create( model="content", app_label="content" ) for group_name, group_perms in GROUP_CONF.items(): group, _ = orm["auth.Group"].objects.get_or_create( name=group_name ) for perm_name in group_perms: perm, _ = orm["auth.Permission"].objects.get_or_create( content_type=content_ct, codename=perm_name, defaults={ "name": PERM_CONF[perm_name] } ) group.permissions.add(perm)
python
def forwards(self, orm): "Write your forwards methods here." PERM_CONF = { "publish_content": "Can publish content", "publish_own_content": "Can publish own content", "change_content": "Can change content", "promote_content": "Can promote content" } GROUP_CONF = dict( contributor=(), author=("publish_own_content",), editor=( "publish_content", "change_content", "promote_content", ), admin=( "publish_content", "change_content", "promote_content", ) ) content_ct, _ = orm["contenttypes.ContentType"].objects.get_or_create( model="content", app_label="content" ) for group_name, group_perms in GROUP_CONF.items(): group, _ = orm["auth.Group"].objects.get_or_create( name=group_name ) for perm_name in group_perms: perm, _ = orm["auth.Permission"].objects.get_or_create( content_type=content_ct, codename=perm_name, defaults={ "name": PERM_CONF[perm_name] } ) group.permissions.add(perm)
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "PERM_CONF", "=", "{", "\"publish_content\"", ":", "\"Can publish content\"", ",", "\"publish_own_content\"", ":", "\"Can publish own content\"", ",", "\"change_content\"", ":", "\"Can change content\"", ",", "\"prom...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/south_migrations/0004_add_groups.py#L9-L46
inveniosoftware/invenio-pages
examples/app.py
pages
def pages(): """Load pages.""" p1 = Page( url='/example1', title='My page with default template', description='my description', content='hello default page', template_name='invenio_pages/default.html', ) p2 = Page( url='/example2', title='My page with my template', description='my description', content='hello my page', template_name='app/mytemplate.html', ) with db.session.begin_nested(): db.session.add(p1) db.session.add(p2) db.session.commit()
python
def pages(): """Load pages.""" p1 = Page( url='/example1', title='My page with default template', description='my description', content='hello default page', template_name='invenio_pages/default.html', ) p2 = Page( url='/example2', title='My page with my template', description='my description', content='hello my page', template_name='app/mytemplate.html', ) with db.session.begin_nested(): db.session.add(p1) db.session.add(p2) db.session.commit()
[ "def", "pages", "(", ")", ":", "p1", "=", "Page", "(", "url", "=", "'/example1'", ",", "title", "=", "'My page with default template'", ",", "description", "=", "'my description'", ",", "content", "=", "'hello default page'", ",", "template_name", "=", "'invenio...
Load pages.
[ "Load", "pages", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/examples/app.py#L71-L90
PGower/PyCanvas
pycanvas/apis/poll_choices.py
PollChoicesAPI.list_poll_choices_in_poll
def list_poll_choices_in_poll(self, poll_id): """ List poll choices in a poll. Returns the list of PollChoices in this poll. """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id self.logger.debug("GET /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True)
python
def list_poll_choices_in_poll(self, poll_id): """ List poll choices in a poll. Returns the list of PollChoices in this poll. """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id self.logger.debug("GET /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True)
[ "def", "list_poll_choices_in_poll", "(", "self", ",", "poll_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - poll_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"poll_id\"", "]", "=", "poll_id", "self", ".",...
List poll choices in a poll. Returns the list of PollChoices in this poll.
[ "List", "poll", "choices", "in", "a", "poll", ".", "Returns", "the", "list", "of", "PollChoices", "in", "this", "poll", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/poll_choices.py#L19-L34
PGower/PyCanvas
pycanvas/apis/poll_choices.py
PollChoicesAPI.create_single_poll_choice
def create_single_poll_choice(self, poll_id, poll_choices_text, poll_choices_is_correct=None, poll_choices_position=None): """ Create a single poll choice. Create a new poll choice for this poll """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id # REQUIRED - poll_choices[text] """The descriptive text of the poll choice.""" data["poll_choices[text]"] = poll_choices_text # OPTIONAL - poll_choices[is_correct] """Whether this poll choice is considered correct or not. Defaults to false.""" if poll_choices_is_correct is not None: data["poll_choices[is_correct]"] = poll_choices_is_correct # OPTIONAL - poll_choices[position] """The order this poll choice should be returned in the context it's sibling poll choices.""" if poll_choices_position is not None: data["poll_choices[position]"] = poll_choices_position self.logger.debug("POST /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True)
python
def create_single_poll_choice(self, poll_id, poll_choices_text, poll_choices_is_correct=None, poll_choices_position=None): """ Create a single poll choice. Create a new poll choice for this poll """ path = {} data = {} params = {} # REQUIRED - PATH - poll_id """ID""" path["poll_id"] = poll_id # REQUIRED - poll_choices[text] """The descriptive text of the poll choice.""" data["poll_choices[text]"] = poll_choices_text # OPTIONAL - poll_choices[is_correct] """Whether this poll choice is considered correct or not. Defaults to false.""" if poll_choices_is_correct is not None: data["poll_choices[is_correct]"] = poll_choices_is_correct # OPTIONAL - poll_choices[position] """The order this poll choice should be returned in the context it's sibling poll choices.""" if poll_choices_position is not None: data["poll_choices[position]"] = poll_choices_position self.logger.debug("POST /api/v1/polls/{poll_id}/poll_choices with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_choices".format(**path), data=data, params=params, no_data=True)
[ "def", "create_single_poll_choice", "(", "self", ",", "poll_id", ",", "poll_choices_text", ",", "poll_choices_is_correct", "=", "None", ",", "poll_choices_position", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}"...
Create a single poll choice. Create a new poll choice for this poll
[ "Create", "a", "single", "poll", "choice", ".", "Create", "a", "new", "poll", "choice", "for", "this", "poll" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/poll_choices.py#L57-L86
Adarnof/adarnauth-esi
esi/views.py
sso_redirect
def sso_redirect(request, scopes=list([]), return_to=None): """ Generates a :model:`esi.CallbackRedirect` for the specified request. Redirects to EVE for login. Accepts a view or URL name as a redirect after SSO. """ logger.debug("Initiating redirect of {0} session {1}".format(request.user, request.session.session_key[:5])) if isinstance(scopes, string_types): scopes = list([scopes]) # ensure only one callback redirect model per session CallbackRedirect.objects.filter(session_key=request.session.session_key).delete() # ensure session installed in database if not request.session.exists(request.session.session_key): logger.debug("Creating new session before redirect.") request.session.create() if return_to: url = reverse(return_to) else: url = request.get_full_path() oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL, scope=scopes) redirect_url, state = oauth.authorization_url(app_settings.ESI_OAUTH_LOGIN_URL) CallbackRedirect.objects.create(session_key=request.session.session_key, state=state, url=url) logger.debug("Redirecting {0} session {1} to SSO. Callback will be redirected to {2}".format(request.user, request.session.session_key[:5], url)) return redirect(redirect_url)
python
def sso_redirect(request, scopes=list([]), return_to=None): """ Generates a :model:`esi.CallbackRedirect` for the specified request. Redirects to EVE for login. Accepts a view or URL name as a redirect after SSO. """ logger.debug("Initiating redirect of {0} session {1}".format(request.user, request.session.session_key[:5])) if isinstance(scopes, string_types): scopes = list([scopes]) # ensure only one callback redirect model per session CallbackRedirect.objects.filter(session_key=request.session.session_key).delete() # ensure session installed in database if not request.session.exists(request.session.session_key): logger.debug("Creating new session before redirect.") request.session.create() if return_to: url = reverse(return_to) else: url = request.get_full_path() oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL, scope=scopes) redirect_url, state = oauth.authorization_url(app_settings.ESI_OAUTH_LOGIN_URL) CallbackRedirect.objects.create(session_key=request.session.session_key, state=state, url=url) logger.debug("Redirecting {0} session {1} to SSO. Callback will be redirected to {2}".format(request.user, request.session.session_key[:5], url)) return redirect(redirect_url)
[ "def", "sso_redirect", "(", "request", ",", "scopes", "=", "list", "(", "[", "]", ")", ",", "return_to", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Initiating redirect of {0} session {1}\"", ".", "format", "(", "request", ".", "user", ",", "req...
Generates a :model:`esi.CallbackRedirect` for the specified request. Redirects to EVE for login. Accepts a view or URL name as a redirect after SSO.
[ "Generates", "a", ":", "model", ":", "esi", ".", "CallbackRedirect", "for", "the", "specified", "request", ".", "Redirects", "to", "EVE", "for", "login", ".", "Accepts", "a", "view", "or", "URL", "name", "as", "a", "redirect", "after", "SSO", "." ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L16-L44
Adarnof/adarnauth-esi
esi/views.py
receive_callback
def receive_callback(request): """ Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url. """ logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5])) # make sure request has required parameters code = request.GET.get('code', None) state = request.GET.get('state', None) try: assert code assert state except AssertionError: logger.debug("Missing parameters for code exchange.") return HttpResponseBadRequest() callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key) token = Token.objects.create_from_request(request) callback.token = token callback.save() logger.debug( "Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url)) return redirect(callback.url)
python
def receive_callback(request): """ Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url. """ logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5])) # make sure request has required parameters code = request.GET.get('code', None) state = request.GET.get('state', None) try: assert code assert state except AssertionError: logger.debug("Missing parameters for code exchange.") return HttpResponseBadRequest() callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key) token = Token.objects.create_from_request(request) callback.token = token callback.save() logger.debug( "Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url)) return redirect(callback.url)
[ "def", "receive_callback", "(", "request", ")", ":", "logger", ".", "debug", "(", "\"Received callback for {0} session {1}\"", ".", "format", "(", "request", ".", "user", ",", "request", ".", "session", ".", "session_key", "[", ":", "5", "]", ")", ")", "# ma...
Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url.
[ "Parses", "SSO", "callback", "validates", "retrieves", ":", "model", ":", "esi", ".", "Token", "and", "internally", "redirects", "to", "the", "target", "url", "." ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L47-L68
Adarnof/adarnauth-esi
esi/views.py
select_token
def select_token(request, scopes='', new=False): """ Presents the user with a selection of applicable tokens for the requested view. """ @tokens_required(scopes=scopes, new=new) def _token_list(r, tokens): context = { 'tokens': tokens, 'base_template': app_settings.ESI_BASE_TEMPLATE, } return render(r, 'esi/select_token.html', context=context) return _token_list(request)
python
def select_token(request, scopes='', new=False): """ Presents the user with a selection of applicable tokens for the requested view. """ @tokens_required(scopes=scopes, new=new) def _token_list(r, tokens): context = { 'tokens': tokens, 'base_template': app_settings.ESI_BASE_TEMPLATE, } return render(r, 'esi/select_token.html', context=context) return _token_list(request)
[ "def", "select_token", "(", "request", ",", "scopes", "=", "''", ",", "new", "=", "False", ")", ":", "@", "tokens_required", "(", "scopes", "=", "scopes", ",", "new", "=", "new", ")", "def", "_token_list", "(", "r", ",", "tokens", ")", ":", "context"...
Presents the user with a selection of applicable tokens for the requested view.
[ "Presents", "the", "user", "with", "a", "selection", "of", "applicable", "tokens", "for", "the", "requested", "view", "." ]
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L71-L84
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
PrimaryKey.get_primary_key
def get_primary_key(self, table): """Retrieve the column which is the primary key for a table.""" for column in self.get_schema(table): if len(column) > 3 and 'pri' in column[3].lower(): return column[0]
python
def get_primary_key(self, table): """Retrieve the column which is the primary key for a table.""" for column in self.get_schema(table): if len(column) > 3 and 'pri' in column[3].lower(): return column[0]
[ "def", "get_primary_key", "(", "self", ",", "table", ")", ":", "for", "column", "in", "self", ".", "get_schema", "(", "table", ")", ":", "if", "len", "(", "column", ")", ">", "3", "and", "'pri'", "in", "column", "[", "3", "]", ".", "lower", "(", ...
Retrieve the column which is the primary key for a table.
[ "Retrieve", "the", "column", "which", "is", "the", "primary", "key", "for", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L9-L13
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
PrimaryKey.set_primary_key
def set_primary_key(self, table, column): """Create a Primary Key constraint on a specific column when the table is already created.""" self.execute('ALTER TABLE {0} ADD PRIMARY KEY ({1})'.format(wrap(table), column)) self._printer('\tAdded primary key to {0} on column {1}'.format(wrap(table), column))
python
def set_primary_key(self, table, column): """Create a Primary Key constraint on a specific column when the table is already created.""" self.execute('ALTER TABLE {0} ADD PRIMARY KEY ({1})'.format(wrap(table), column)) self._printer('\tAdded primary key to {0} on column {1}'.format(wrap(table), column))
[ "def", "set_primary_key", "(", "self", ",", "table", ",", "column", ")", ":", "self", ".", "execute", "(", "'ALTER TABLE {0} ADD PRIMARY KEY ({1})'", ".", "format", "(", "wrap", "(", "table", ")", ",", "column", ")", ")", "self", ".", "_printer", "(", "'\\...
Create a Primary Key constraint on a specific column when the table is already created.
[ "Create", "a", "Primary", "Key", "constraint", "on", "a", "specific", "column", "when", "the", "table", "is", "already", "created", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L15-L18
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
PrimaryKey.set_primary_keys_auto
def set_primary_keys_auto(self, tables=None): """ Create primary keys for every table in the connected database. Checks that each table has a primary key. If a table does not have a key then each column is analyzed to determine if it contains only unique values. If no columns exist containing only unique values then a new 'ID' column is created to serve as a auto_incrementing primary key. """ # Retrieve list of tables if not provided tables = tables if tables else self.tables # Resolve primary keys and return list of table, primary_key tuples return [(table, self.set_primary_key_auto(table)) for table in tables]
python
def set_primary_keys_auto(self, tables=None): """ Create primary keys for every table in the connected database. Checks that each table has a primary key. If a table does not have a key then each column is analyzed to determine if it contains only unique values. If no columns exist containing only unique values then a new 'ID' column is created to serve as a auto_incrementing primary key. """ # Retrieve list of tables if not provided tables = tables if tables else self.tables # Resolve primary keys and return list of table, primary_key tuples return [(table, self.set_primary_key_auto(table)) for table in tables]
[ "def", "set_primary_keys_auto", "(", "self", ",", "tables", "=", "None", ")", ":", "# Retrieve list of tables if not provided", "tables", "=", "tables", "if", "tables", "else", "self", ".", "tables", "# Resolve primary keys and return list of table, primary_key tuples", "re...
Create primary keys for every table in the connected database. Checks that each table has a primary key. If a table does not have a key then each column is analyzed to determine if it contains only unique values. If no columns exist containing only unique values then a new 'ID' column is created to serve as a auto_incrementing primary key.
[ "Create", "primary", "keys", "for", "every", "table", "in", "the", "connected", "database", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L20-L33
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
PrimaryKey.set_primary_key_auto
def set_primary_key_auto(self, table): """ Analysis a table and set a primary key. Determine primary key by identifying a column with unique values or creating a new column. :param table: Table to alter :return: Primary Key column """ # Confirm no primary key exists pk = self.get_primary_key(table) if not pk: # Determine if there is a unique column that can become the PK unique_col = self.get_unique_column(table) # Set primary key if unique_col: self.set_primary_key(table, unique_col) # Create unique 'ID' column else: unique_col = self.add_column(table, primary_key=True) return unique_col else: return pk
python
def set_primary_key_auto(self, table): """ Analysis a table and set a primary key. Determine primary key by identifying a column with unique values or creating a new column. :param table: Table to alter :return: Primary Key column """ # Confirm no primary key exists pk = self.get_primary_key(table) if not pk: # Determine if there is a unique column that can become the PK unique_col = self.get_unique_column(table) # Set primary key if unique_col: self.set_primary_key(table, unique_col) # Create unique 'ID' column else: unique_col = self.add_column(table, primary_key=True) return unique_col else: return pk
[ "def", "set_primary_key_auto", "(", "self", ",", "table", ")", ":", "# Confirm no primary key exists", "pk", "=", "self", ".", "get_primary_key", "(", "table", ")", "if", "not", "pk", ":", "# Determine if there is a unique column that can become the PK", "unique_col", "...
Analysis a table and set a primary key. Determine primary key by identifying a column with unique values or creating a new column. :param table: Table to alter :return: Primary Key column
[ "Analysis", "a", "table", "and", "set", "a", "primary", "key", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L35-L60
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
PrimaryKey.drop_primary_key
def drop_primary_key(self, table): """Drop a Primary Key constraint for a specific table.""" if self.get_primary_key(table): self.execute('ALTER TABLE {0} DROP PRIMARY KEY'.format(wrap(table)))
python
def drop_primary_key(self, table): """Drop a Primary Key constraint for a specific table.""" if self.get_primary_key(table): self.execute('ALTER TABLE {0} DROP PRIMARY KEY'.format(wrap(table)))
[ "def", "drop_primary_key", "(", "self", ",", "table", ")", ":", "if", "self", ".", "get_primary_key", "(", "table", ")", ":", "self", ".", "execute", "(", "'ALTER TABLE {0} DROP PRIMARY KEY'", ".", "format", "(", "wrap", "(", "table", ")", ")", ")" ]
Drop a Primary Key constraint for a specific table.
[ "Drop", "a", "Primary", "Key", "constraint", "for", "a", "specific", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L62-L65
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/keys.py
ForeignKey.set_foreign_key
def set_foreign_key(self, parent_table, parent_column, child_table, child_column): """Create a Foreign Key constraint on a column from a table.""" self.execute('ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'.format(parent_table, parent_column, child_table, child_column))
python
def set_foreign_key(self, parent_table, parent_column, child_table, child_column): """Create a Foreign Key constraint on a column from a table.""" self.execute('ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'.format(parent_table, parent_column, child_table, child_column))
[ "def", "set_foreign_key", "(", "self", ",", "parent_table", ",", "parent_column", ",", "child_table", ",", "child_column", ")", ":", "self", ".", "execute", "(", "'ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})'", ".", "format", "(", "parent_table", ",", "p...
Create a Foreign Key constraint on a column from a table.
[ "Create", "a", "Foreign", "Key", "constraint", "on", "a", "column", "from", "a", "table", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/keys.py#L69-L72
jcassee/django-geckoboard
django_geckoboard/decorators.py
_is_api_key_correct
def _is_api_key_correct(request): """Return whether the Geckoboard API key on the request is correct.""" api_key = getattr(settings, 'GECKOBOARD_API_KEY', None) if api_key is None: return True auth = request.META.get('HTTP_AUTHORIZATION', '').split() if len(auth) == 2: if auth[0].lower() == b'basic': request_key = base64.b64decode(auth[1]).split(b':')[0] return request_key == api_key return False
python
def _is_api_key_correct(request): """Return whether the Geckoboard API key on the request is correct.""" api_key = getattr(settings, 'GECKOBOARD_API_KEY', None) if api_key is None: return True auth = request.META.get('HTTP_AUTHORIZATION', '').split() if len(auth) == 2: if auth[0].lower() == b'basic': request_key = base64.b64decode(auth[1]).split(b':')[0] return request_key == api_key return False
[ "def", "_is_api_key_correct", "(", "request", ")", ":", "api_key", "=", "getattr", "(", "settings", ",", "'GECKOBOARD_API_KEY'", ",", "None", ")", "if", "api_key", "is", "None", ":", "return", "True", "auth", "=", "request", ".", "META", ".", "get", "(", ...
Return whether the Geckoboard API key on the request is correct.
[ "Return", "whether", "the", "Geckoboard", "API", "key", "on", "the", "request", "is", "correct", "." ]
train
https://github.com/jcassee/django-geckoboard/blob/6ebdaa86015fe645360abf1ba1290132de4cf6d6/django_geckoboard/decorators.py#L425-L435
jcassee/django-geckoboard
django_geckoboard/decorators.py
_encrypt
def _encrypt(data): """Equivalent to OpenSSL using 256 bit AES in CBC mode""" BS = AES.block_size def pad(s): n = BS - len(s) % BS char = chr(n).encode('utf8') return s + n * char password = settings.GECKOBOARD_PASSWORD salt = Random.new().read(BS - len('Salted__')) key, iv = _derive_key_and_iv(password, salt, 32, BS) cipher = AES.new(key, AES.MODE_CBC, iv) encrypted = b'Salted__' + salt + cipher.encrypt(pad(data)) return base64.b64encode(encrypted)
python
def _encrypt(data): """Equivalent to OpenSSL using 256 bit AES in CBC mode""" BS = AES.block_size def pad(s): n = BS - len(s) % BS char = chr(n).encode('utf8') return s + n * char password = settings.GECKOBOARD_PASSWORD salt = Random.new().read(BS - len('Salted__')) key, iv = _derive_key_and_iv(password, salt, 32, BS) cipher = AES.new(key, AES.MODE_CBC, iv) encrypted = b'Salted__' + salt + cipher.encrypt(pad(data)) return base64.b64encode(encrypted)
[ "def", "_encrypt", "(", "data", ")", ":", "BS", "=", "AES", ".", "block_size", "def", "pad", "(", "s", ")", ":", "n", "=", "BS", "-", "len", "(", "s", ")", "%", "BS", "char", "=", "chr", "(", "n", ")", ".", "encode", "(", "'utf8'", ")", "re...
Equivalent to OpenSSL using 256 bit AES in CBC mode
[ "Equivalent", "to", "OpenSSL", "using", "256", "bit", "AES", "in", "CBC", "mode" ]
train
https://github.com/jcassee/django-geckoboard/blob/6ebdaa86015fe645360abf1ba1290132de4cf6d6/django_geckoboard/decorators.py#L446-L460
jcassee/django-geckoboard
django_geckoboard/decorators.py
_render
def _render(request, data, encrypted, format=None): """ Render the data to Geckoboard. If the `format` parameter is passed to the widget it defines the output format. Otherwise the output format is based on the `format` request parameter. A `format` paramater of ``json`` or ``2`` renders JSON output, any other value renders XML. """ if not format: format = request.POST.get('format', '') if not format: format = request.GET.get('format', '') if format == 'json' or format == '2': return _render_json(data, encrypted) else: return _render_xml(data, encrypted)
python
def _render(request, data, encrypted, format=None): """ Render the data to Geckoboard. If the `format` parameter is passed to the widget it defines the output format. Otherwise the output format is based on the `format` request parameter. A `format` paramater of ``json`` or ``2`` renders JSON output, any other value renders XML. """ if not format: format = request.POST.get('format', '') if not format: format = request.GET.get('format', '') if format == 'json' or format == '2': return _render_json(data, encrypted) else: return _render_xml(data, encrypted)
[ "def", "_render", "(", "request", ",", "data", ",", "encrypted", ",", "format", "=", "None", ")", ":", "if", "not", "format", ":", "format", "=", "request", ".", "POST", ".", "get", "(", "'format'", ",", "''", ")", "if", "not", "format", ":", "form...
Render the data to Geckoboard. If the `format` parameter is passed to the widget it defines the output format. Otherwise the output format is based on the `format` request parameter. A `format` paramater of ``json`` or ``2`` renders JSON output, any other value renders XML.
[ "Render", "the", "data", "to", "Geckoboard", ".", "If", "the", "format", "parameter", "is", "passed", "to", "the", "widget", "it", "defines", "the", "output", "format", ".", "Otherwise", "the", "output", "format", "is", "based", "on", "the", "format", "req...
train
https://github.com/jcassee/django-geckoboard/blob/6ebdaa86015fe645360abf1ba1290132de4cf6d6/django_geckoboard/decorators.py#L463-L479
raags/ipmitool
ipmi/ipmicli.py
print_report
def print_report(runner_results): """ Print collated report with output and errors if any """ error_report = collections.defaultdict(list) output_report = collections.defaultdict(list) success_report = list() for runner_info in runner_results: hostname = runner_info['console'] error = runner_info['error'] output = runner_info['output'] if error: error_report[error].append(hostname) elif output: output_report[output].append(hostname) else: success_report.append(hostname) if error_report: print("Errors : ") for error in error_report: print("{0} -- [{1}] {2}".format(error.strip(), len(error_report[error]), ", ".join(error_report[error]))) print() if output_report: for output in output_report: print("{0} -- [{1}] {2}".format(output, len(output_report[output]), ", ".join(output_report[output]))) if success_report: print("Completed config on {0} hosts".format(len(success_report)))
python
def print_report(runner_results): """ Print collated report with output and errors if any """ error_report = collections.defaultdict(list) output_report = collections.defaultdict(list) success_report = list() for runner_info in runner_results: hostname = runner_info['console'] error = runner_info['error'] output = runner_info['output'] if error: error_report[error].append(hostname) elif output: output_report[output].append(hostname) else: success_report.append(hostname) if error_report: print("Errors : ") for error in error_report: print("{0} -- [{1}] {2}".format(error.strip(), len(error_report[error]), ", ".join(error_report[error]))) print() if output_report: for output in output_report: print("{0} -- [{1}] {2}".format(output, len(output_report[output]), ", ".join(output_report[output]))) if success_report: print("Completed config on {0} hosts".format(len(success_report)))
[ "def", "print_report", "(", "runner_results", ")", ":", "error_report", "=", "collections", ".", "defaultdict", "(", "list", ")", "output_report", "=", "collections", ".", "defaultdict", "(", "list", ")", "success_report", "=", "list", "(", ")", "for", "runner...
Print collated report with output and errors if any
[ "Print", "collated", "report", "with", "output", "and", "errors", "if", "any" ]
train
https://github.com/raags/ipmitool/blob/830081623c0ec75d560123a559f0bb201f26cde6/ipmi/ipmicli.py#L83-L114
raags/ipmitool
ipmi/ipmicli.py
Runner.ipmi_method
def ipmi_method(self, command): """Use ipmitool to run commands with ipmi protocol """ ipmi = ipmitool(self.console, self.password, self.username) if command == "reboot": self.ipmi_method(command="status") if self.output == "Chassis Power is off": command = "on" ipmi.execute(self.ipmi_map[command]) if ipmi.status: self.error = ipmi.error.strip() else: self.output = ipmi.output.strip() self.status = ipmi.status
python
def ipmi_method(self, command): """Use ipmitool to run commands with ipmi protocol """ ipmi = ipmitool(self.console, self.password, self.username) if command == "reboot": self.ipmi_method(command="status") if self.output == "Chassis Power is off": command = "on" ipmi.execute(self.ipmi_map[command]) if ipmi.status: self.error = ipmi.error.strip() else: self.output = ipmi.output.strip() self.status = ipmi.status
[ "def", "ipmi_method", "(", "self", ",", "command", ")", ":", "ipmi", "=", "ipmitool", "(", "self", ".", "console", ",", "self", ".", "password", ",", "self", ".", "username", ")", "if", "command", "==", "\"reboot\"", ":", "self", ".", "ipmi_method", "(...
Use ipmitool to run commands with ipmi protocol
[ "Use", "ipmitool", "to", "run", "commands", "with", "ipmi", "protocol" ]
train
https://github.com/raags/ipmitool/blob/830081623c0ec75d560123a559f0bb201f26cde6/ipmi/ipmicli.py#L47-L63
raags/ipmitool
ipmi/ipmicli.py
Runner.run
def run(self): """Start thread run here """ try: if self.command == "pxer": self.ipmi_method(command="pxe") if self.status == 0 or self.status == None: self.command = "reboot" else: return self.ipmi_method(self.command) except Exception as e: self.error = str(e)
python
def run(self): """Start thread run here """ try: if self.command == "pxer": self.ipmi_method(command="pxe") if self.status == 0 or self.status == None: self.command = "reboot" else: return self.ipmi_method(self.command) except Exception as e: self.error = str(e)
[ "def", "run", "(", "self", ")", ":", "try", ":", "if", "self", ".", "command", "==", "\"pxer\"", ":", "self", ".", "ipmi_method", "(", "command", "=", "\"pxe\"", ")", "if", "self", ".", "status", "==", "0", "or", "self", ".", "status", "==", "None"...
Start thread run here
[ "Start", "thread", "run", "here" ]
train
https://github.com/raags/ipmitool/blob/830081623c0ec75d560123a559f0bb201f26cde6/ipmi/ipmicli.py#L65-L79
shmir/PyIxNetwork
ixnetwork/ixn_traffic.py
IxnTrafficItem._create
def _create(self): """ Create new object on IxNetwork. :return: IXN object reference. """ if 'name' in self._data: obj_ref = self.api.add(self.obj_parent(), self.obj_type(), name=self.obj_name()) else: obj_ref = self.api.add(self.obj_parent(), self.obj_type()) self.api.commit() return self.api.remapIds(obj_ref)
python
def _create(self): """ Create new object on IxNetwork. :return: IXN object reference. """ if 'name' in self._data: obj_ref = self.api.add(self.obj_parent(), self.obj_type(), name=self.obj_name()) else: obj_ref = self.api.add(self.obj_parent(), self.obj_type()) self.api.commit() return self.api.remapIds(obj_ref)
[ "def", "_create", "(", "self", ")", ":", "if", "'name'", "in", "self", ".", "_data", ":", "obj_ref", "=", "self", ".", "api", ".", "add", "(", "self", ".", "obj_parent", "(", ")", ",", "self", ".", "obj_type", "(", ")", ",", "name", "=", "self", ...
Create new object on IxNetwork. :return: IXN object reference.
[ "Create", "new", "object", "on", "IxNetwork", "." ]
train
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_traffic.py#L36-L47
arcturial/clickatell-python
clickatell/__init__.py
Transport.merge
def merge(self, *args): """ Merge multiple dictionary objects into one. :param variadic args: Multiple dictionary items :return dict """ values = [] for entry in args: values = values + list(entry.items()) return dict(values)
python
def merge(self, *args): """ Merge multiple dictionary objects into one. :param variadic args: Multiple dictionary items :return dict """ values = [] for entry in args: values = values + list(entry.items()) return dict(values)
[ "def", "merge", "(", "self", ",", "*", "args", ")", ":", "values", "=", "[", "]", "for", "entry", "in", "args", ":", "values", "=", "values", "+", "list", "(", "entry", ".", "items", "(", ")", ")", "return", "dict", "(", "values", ")" ]
Merge multiple dictionary objects into one. :param variadic args: Multiple dictionary items :return dict
[ "Merge", "multiple", "dictionary", "objects", "into", "one", "." ]
train
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/__init__.py#L38-L51
arcturial/clickatell-python
clickatell/__init__.py
Transport.parseLegacy
def parseLegacy(self, response): """ Parse a legacy response and try and catch any errors. If we have multiple responses we wont catch any exceptions, we will return the errors row by row :param dict response: The response string returned from request() :return Returns a dictionary or a list (list for multiple responses) """ lines = response.splitlines() result = [] pattern = re.compile('([A-Za-z]+):((.(?![A-Za-z]+:))*)') for line in lines: matches = pattern.findall(line) row = {} for match in matches: row[match[0]] = match[1].strip() try: error = row['ERR'].split(',') except KeyError: pass else: row['code'] = error[0] if len(error) == 2 else 0 row['error'] = error[1].strip() if len(error) == 2 else error[0] del row['ERR'] # If this response is a single row response, then we will throw # an exception to alert the user of any failures. if (len(lines) == 1): raise ClickatellError(row['error'], row['code']) finally: result.append(row) return result if len(result) > 1 else result[0]
python
def parseLegacy(self, response): """ Parse a legacy response and try and catch any errors. If we have multiple responses we wont catch any exceptions, we will return the errors row by row :param dict response: The response string returned from request() :return Returns a dictionary or a list (list for multiple responses) """ lines = response.splitlines() result = [] pattern = re.compile('([A-Za-z]+):((.(?![A-Za-z]+:))*)') for line in lines: matches = pattern.findall(line) row = {} for match in matches: row[match[0]] = match[1].strip() try: error = row['ERR'].split(',') except KeyError: pass else: row['code'] = error[0] if len(error) == 2 else 0 row['error'] = error[1].strip() if len(error) == 2 else error[0] del row['ERR'] # If this response is a single row response, then we will throw # an exception to alert the user of any failures. if (len(lines) == 1): raise ClickatellError(row['error'], row['code']) finally: result.append(row) return result if len(result) > 1 else result[0]
[ "def", "parseLegacy", "(", "self", ",", "response", ")", ":", "lines", "=", "response", ".", "splitlines", "(", ")", "result", "=", "[", "]", "pattern", "=", "re", ".", "compile", "(", "'([A-Za-z]+):((.(?![A-Za-z]+:))*)'", ")", "for", "line", "in", "lines"...
Parse a legacy response and try and catch any errors. If we have multiple responses we wont catch any exceptions, we will return the errors row by row :param dict response: The response string returned from request() :return Returns a dictionary or a list (list for multiple responses)
[ "Parse", "a", "legacy", "response", "and", "try", "and", "catch", "any", "errors", ".", "If", "we", "have", "multiple", "responses", "we", "wont", "catch", "any", "exceptions", "we", "will", "return", "the", "errors", "row", "by", "row" ]
train
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/__init__.py#L53-L90
arcturial/clickatell-python
clickatell/__init__.py
Transport.parseRest
def parseRest(self, response): """ Parse a REST response. If the response contains an error field, we will raise it as an exception. """ body = json.loads(response) try: error = body['error']['description'] code = body['error']['code'] except Exception: return body['data'] else: raise ClickatellError(error, code);
python
def parseRest(self, response): """ Parse a REST response. If the response contains an error field, we will raise it as an exception. """ body = json.loads(response) try: error = body['error']['description'] code = body['error']['code'] except Exception: return body['data'] else: raise ClickatellError(error, code);
[ "def", "parseRest", "(", "self", ",", "response", ")", ":", "body", "=", "json", ".", "loads", "(", "response", ")", "try", ":", "error", "=", "body", "[", "'error'", "]", "[", "'description'", "]", "code", "=", "body", "[", "'error'", "]", "[", "'...
Parse a REST response. If the response contains an error field, we will raise it as an exception.
[ "Parse", "a", "REST", "response", ".", "If", "the", "response", "contains", "an", "error", "field", "we", "will", "raise", "it", "as", "an", "exception", "." ]
train
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/__init__.py#L92-L105
arcturial/clickatell-python
clickatell/__init__.py
Transport.request
def request(self, action, data={}, headers={}, method='GET'): """ Run the HTTP request against the Clickatell API :param str action: The API action :param dict data: The request parameters :param dict headers: The request headers (if any) :param str method: The HTTP method :return: The request response """ url = ('https' if self.secure else 'http') + '://' + self.endpoint url = url + '/' + action # Set the User-Agent userAgent = "".join(["ClickatellPython/0.1.2", " ", "Python/", platform.python_version()]) headers = self.merge({ "User-Agent": userAgent }, headers) try: func = getattr(requests, method.lower()) except AttributeError: raise Exception('HTTP method ' + method + ' unsupported.') resp = func(url, params=data, data=json.dumps(data), headers=headers) # Set the coding before unwrapping the text resp.encoding = 'utf-8' content = resp.text return content
python
def request(self, action, data={}, headers={}, method='GET'): """ Run the HTTP request against the Clickatell API :param str action: The API action :param dict data: The request parameters :param dict headers: The request headers (if any) :param str method: The HTTP method :return: The request response """ url = ('https' if self.secure else 'http') + '://' + self.endpoint url = url + '/' + action # Set the User-Agent userAgent = "".join(["ClickatellPython/0.1.2", " ", "Python/", platform.python_version()]) headers = self.merge({ "User-Agent": userAgent }, headers) try: func = getattr(requests, method.lower()) except AttributeError: raise Exception('HTTP method ' + method + ' unsupported.') resp = func(url, params=data, data=json.dumps(data), headers=headers) # Set the coding before unwrapping the text resp.encoding = 'utf-8' content = resp.text return content
[ "def", "request", "(", "self", ",", "action", ",", "data", "=", "{", "}", ",", "headers", "=", "{", "}", ",", "method", "=", "'GET'", ")", ":", "url", "=", "(", "'https'", "if", "self", ".", "secure", "else", "'http'", ")", "+", "'://'", "+", "...
Run the HTTP request against the Clickatell API :param str action: The API action :param dict data: The request parameters :param dict headers: The request headers (if any) :param str method: The HTTP method :return: The request response
[ "Run", "the", "HTTP", "request", "against", "the", "Clickatell", "API" ]
train
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/__init__.py#L107-L135
theonion/django-bulbs
bulbs/promotion/views.py
OperationsViewSet.serialize_operations
def serialize_operations(self, operations): """Serialize a list of operations into JSON.""" serialized_ops = [] for operation in operations: serializer = self.get_serializer_class(operation.__class__) serialized_ops.append(serializer(operation).data) return serialized_ops
python
def serialize_operations(self, operations): """Serialize a list of operations into JSON.""" serialized_ops = [] for operation in operations: serializer = self.get_serializer_class(operation.__class__) serialized_ops.append(serializer(operation).data) return serialized_ops
[ "def", "serialize_operations", "(", "self", ",", "operations", ")", ":", "serialized_ops", "=", "[", "]", "for", "operation", "in", "operations", ":", "serializer", "=", "self", ".", "get_serializer_class", "(", "operation", ".", "__class__", ")", "serialized_op...
Serialize a list of operations into JSON.
[ "Serialize", "a", "list", "of", "operations", "into", "JSON", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L50-L57
theonion/django-bulbs
bulbs/promotion/views.py
OperationsViewSet.get
def get(self, request, pzone_pk): """Get all the operations for a given pzone.""" # attempt to get given pzone try: pzone = PZone.objects.get(pk=pzone_pk) except PZone.DoesNotExist: raise Http404("Cannot find given pzone.") # bulid filters filters = {"pzone": pzone} if "from" in request.GET: parsed = dateparse.parse_datetime(request.GET["from"]) if parsed is not None: filters["when__gte"] = parsed if "to" in request.GET: parsed = dateparse.parse_datetime(request.GET["to"]) if parsed is not None: filters["when__lt"] = parsed # get operations and serialize them operations = PZoneOperation.objects.filter(**filters) # return a json response with serialized operations return Response(self.serialize_operations(operations), content_type="application/json")
python
def get(self, request, pzone_pk): """Get all the operations for a given pzone.""" # attempt to get given pzone try: pzone = PZone.objects.get(pk=pzone_pk) except PZone.DoesNotExist: raise Http404("Cannot find given pzone.") # bulid filters filters = {"pzone": pzone} if "from" in request.GET: parsed = dateparse.parse_datetime(request.GET["from"]) if parsed is not None: filters["when__gte"] = parsed if "to" in request.GET: parsed = dateparse.parse_datetime(request.GET["to"]) if parsed is not None: filters["when__lt"] = parsed # get operations and serialize them operations = PZoneOperation.objects.filter(**filters) # return a json response with serialized operations return Response(self.serialize_operations(operations), content_type="application/json")
[ "def", "get", "(", "self", ",", "request", ",", "pzone_pk", ")", ":", "# attempt to get given pzone", "try", ":", "pzone", "=", "PZone", ".", "objects", ".", "get", "(", "pk", "=", "pzone_pk", ")", "except", "PZone", ".", "DoesNotExist", ":", "raise", "H...
Get all the operations for a given pzone.
[ "Get", "all", "the", "operations", "for", "a", "given", "pzone", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L59-L85
theonion/django-bulbs
bulbs/promotion/views.py
OperationsViewSet.post
def post(self, request, pzone_pk): """Add a new operation to the given pzone, return json of the new operation.""" # attempt to get given content list pzone = None try: pzone = PZone.objects.get(pk=pzone_pk) except PZone.DoesNotExist: raise Http404("Cannot find given pzone.") json_obj = [] http_status = 500 json_op = json.loads(request.body.decode("utf8")) if not isinstance(json_op, list): json_op = [json_op] for data in json_op: try: serializer = self.get_serializer_class_by_name(data["type_name"]) except ContentType.DoesNotExist as e: json_obj = {"errors": [str(e)]} http_status = 400 break serialized = serializer(data=data) if serialized.is_valid(): # object is valid, save it serialized.save() # set response data json_obj.append(serialized.data) http_status = 200 else: # object is not valid, return errors in a 400 response json_obj = serialized.errors http_status = 400 break if http_status == 200 and len(json_obj) == 1: json_obj = json_obj[0] # cache the time in seconds until the next operation occurs next_ops = PZoneOperation.objects.filter(when__lte=timezone.now()) if len(next_ops) > 0: # we have at least one operation, ordered soonest first next_op = next_ops[0] # cache with expiry number of seconds until op should exec cache.set('pzone-operation-expiry-' + pzone.name, next_op.when, 60 * 60 * 5) return Response( json_obj, status=http_status, content_type="application/json" )
python
def post(self, request, pzone_pk): """Add a new operation to the given pzone, return json of the new operation.""" # attempt to get given content list pzone = None try: pzone = PZone.objects.get(pk=pzone_pk) except PZone.DoesNotExist: raise Http404("Cannot find given pzone.") json_obj = [] http_status = 500 json_op = json.loads(request.body.decode("utf8")) if not isinstance(json_op, list): json_op = [json_op] for data in json_op: try: serializer = self.get_serializer_class_by_name(data["type_name"]) except ContentType.DoesNotExist as e: json_obj = {"errors": [str(e)]} http_status = 400 break serialized = serializer(data=data) if serialized.is_valid(): # object is valid, save it serialized.save() # set response data json_obj.append(serialized.data) http_status = 200 else: # object is not valid, return errors in a 400 response json_obj = serialized.errors http_status = 400 break if http_status == 200 and len(json_obj) == 1: json_obj = json_obj[0] # cache the time in seconds until the next operation occurs next_ops = PZoneOperation.objects.filter(when__lte=timezone.now()) if len(next_ops) > 0: # we have at least one operation, ordered soonest first next_op = next_ops[0] # cache with expiry number of seconds until op should exec cache.set('pzone-operation-expiry-' + pzone.name, next_op.when, 60 * 60 * 5) return Response( json_obj, status=http_status, content_type="application/json" )
[ "def", "post", "(", "self", ",", "request", ",", "pzone_pk", ")", ":", "# attempt to get given content list", "pzone", "=", "None", "try", ":", "pzone", "=", "PZone", ".", "objects", ".", "get", "(", "pk", "=", "pzone_pk", ")", "except", "PZone", ".", "D...
Add a new operation to the given pzone, return json of the new operation.
[ "Add", "a", "new", "operation", "to", "the", "given", "pzone", "return", "json", "of", "the", "new", "operation", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L87-L141
theonion/django-bulbs
bulbs/promotion/views.py
OperationsViewSet.delete
def delete(self, request, pzone_pk, operation_pk): """Remove an operation from the given pzone.""" # note : we're not using the pzone_pk here since it's not actually # necessary for getting an operation by pk, but it sure makes the urls # nicer! # attempt to delete operation try: operation = PZoneOperation.objects.get(pk=operation_pk) except PZoneOperation.DoesNotExist: raise Http404("Cannot find given operation.") # delete operation operation.delete() # successful delete, return 204 return Response("", 204)
python
def delete(self, request, pzone_pk, operation_pk): """Remove an operation from the given pzone.""" # note : we're not using the pzone_pk here since it's not actually # necessary for getting an operation by pk, but it sure makes the urls # nicer! # attempt to delete operation try: operation = PZoneOperation.objects.get(pk=operation_pk) except PZoneOperation.DoesNotExist: raise Http404("Cannot find given operation.") # delete operation operation.delete() # successful delete, return 204 return Response("", 204)
[ "def", "delete", "(", "self", ",", "request", ",", "pzone_pk", ",", "operation_pk", ")", ":", "# note : we're not using the pzone_pk here since it's not actually", "# necessary for getting an operation by pk, but it sure makes the urls", "# nicer!", "# attempt to delete operation",...
Remove an operation from the given pzone.
[ "Remove", "an", "operation", "from", "the", "given", "pzone", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L143-L160
theonion/django-bulbs
bulbs/promotion/views.py
PZoneViewSet.perform_update
def perform_update(self, serializer): """creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated) """ instance = serializer.save() # create history object instance.history.create(data=instance.data)
python
def perform_update(self, serializer): """creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated) """ instance = serializer.save() # create history object instance.history.create(data=instance.data)
[ "def", "perform_update", "(", "self", ",", "serializer", ")", ":", "instance", "=", "serializer", ".", "save", "(", ")", "# create history object", "instance", ".", "history", ".", "create", "(", "data", "=", "instance", ".", "data", ")" ]
creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated)
[ "creates", "a", "record", "in", "the", "bulbs", ".", "promotion", ".", "PZoneHistory" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L171-L179
theonion/django-bulbs
bulbs/promotion/views.py
PZoneViewSet.retrieve
def retrieve(self, request, *args, **kwargs): """Retrieve pzone as a preview or applied if no preview is provided.""" when_param = get_query_params(self.request).get("preview", None) pk = self.kwargs["pk"] when = None if when_param: try: when = parse_date(when_param) except ValueError: # invalid format, set back to None when = None pzone = None if when: # we have a date, use it pzone = PZone.objects.preview(pk=pk, when=when) else: # we have no date, just get the pzone pzone = PZone.objects.applied(pk=pk) # turn content list into json return Response(PZoneSerializer(pzone).data, content_type="application/json")
python
def retrieve(self, request, *args, **kwargs): """Retrieve pzone as a preview or applied if no preview is provided.""" when_param = get_query_params(self.request).get("preview", None) pk = self.kwargs["pk"] when = None if when_param: try: when = parse_date(when_param) except ValueError: # invalid format, set back to None when = None pzone = None if when: # we have a date, use it pzone = PZone.objects.preview(pk=pk, when=when) else: # we have no date, just get the pzone pzone = PZone.objects.applied(pk=pk) # turn content list into json return Response(PZoneSerializer(pzone).data, content_type="application/json")
[ "def", "retrieve", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "when_param", "=", "get_query_params", "(", "self", ".", "request", ")", ".", "get", "(", "\"preview\"", ",", "None", ")", "pk", "=", "self", ".", ...
Retrieve pzone as a preview or applied if no preview is provided.
[ "Retrieve", "pzone", "as", "a", "preview", "or", "applied", "if", "no", "preview", "is", "provided", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/promotion/views.py#L181-L204
bioasp/caspo
caspo/design.py
Designer.design
def design(self, max_stimuli=-1, max_inhibitors=-1, max_experiments=10, relax=False, configure=None): """ Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`. Example:: >>> from caspo import core, design >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> designer = design.Designer(networks, setup) >>> designer.design(3, 2) >>> for i,d in enumerate(designer.designs): ... f = 'design-%s' % i ... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors) Parameters ---------- max_stimuli : int Maximum number of stimuli per experiment max_inhibitors : int Maximum number of inhibitors per experiment max_experiments : int Maximum number of experiments per design relax : boolean Whether to relax the full-pairwise networks discrimination (True) or not (False). If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments` configure : callable Callable object responsible of setting clingo configuration """ self.designs = [] args = ['-c maxstimuli=%s' % max_stimuli, '-c maxinhibitors=%s' % max_inhibitors, '-Wno-atom-undefined'] clingo = gringo.Control(args) clingo.conf.solve.opt_mode = 'optN' if configure is not None: configure(clingo.conf) clingo.add("base", [], self.instance) clingo.load(self.encodings['design']) clingo.ground([("base", [])]) if relax: parts = [("step", [step]) for step in xrange(1, max_experiments+1)] parts.append(("diff", [max_experiments + 1])) clingo.ground(parts) ret = clingo.solve(on_model=self.__save__) else: step, ret = 0, gringo.SolveResult.UNKNOWN while step <= max_experiments and ret != gringo.SolveResult.SAT: parts = [] parts.append(("check", [step])) if step > 0: clingo.release_external(gringo.Fun("query", [step-1])) parts.append(("step", [step])) clingo.cleanup_domains() clingo.ground(parts) clingo.assign_external(gringo.Fun("query", [step]), True) ret, step = clingo.solve(on_model=self.__save__), step + 1 self.stats['time_optimum'] = clingo.stats['time_solve'] self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s optimal experimental designs found in %.4fs", len(self.designs), self.stats['time_enumeration'])
python
def design(self, max_stimuli=-1, max_inhibitors=-1, max_experiments=10, relax=False, configure=None): """ Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`. Example:: >>> from caspo import core, design >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> designer = design.Designer(networks, setup) >>> designer.design(3, 2) >>> for i,d in enumerate(designer.designs): ... f = 'design-%s' % i ... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors) Parameters ---------- max_stimuli : int Maximum number of stimuli per experiment max_inhibitors : int Maximum number of inhibitors per experiment max_experiments : int Maximum number of experiments per design relax : boolean Whether to relax the full-pairwise networks discrimination (True) or not (False). If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments` configure : callable Callable object responsible of setting clingo configuration """ self.designs = [] args = ['-c maxstimuli=%s' % max_stimuli, '-c maxinhibitors=%s' % max_inhibitors, '-Wno-atom-undefined'] clingo = gringo.Control(args) clingo.conf.solve.opt_mode = 'optN' if configure is not None: configure(clingo.conf) clingo.add("base", [], self.instance) clingo.load(self.encodings['design']) clingo.ground([("base", [])]) if relax: parts = [("step", [step]) for step in xrange(1, max_experiments+1)] parts.append(("diff", [max_experiments + 1])) clingo.ground(parts) ret = clingo.solve(on_model=self.__save__) else: step, ret = 0, gringo.SolveResult.UNKNOWN while step <= max_experiments and ret != gringo.SolveResult.SAT: parts = [] parts.append(("check", [step])) if step > 0: clingo.release_external(gringo.Fun("query", [step-1])) parts.append(("step", [step])) clingo.cleanup_domains() clingo.ground(parts) clingo.assign_external(gringo.Fun("query", [step]), True) ret, step = clingo.solve(on_model=self.__save__), step + 1 self.stats['time_optimum'] = clingo.stats['time_solve'] self.stats['time_enumeration'] = clingo.stats['time_total'] self._logger.info("%s optimal experimental designs found in %.4fs", len(self.designs), self.stats['time_enumeration'])
[ "def", "design", "(", "self", ",", "max_stimuli", "=", "-", "1", ",", "max_inhibitors", "=", "-", "1", ",", "max_experiments", "=", "10", ",", "relax", "=", "False", ",", "configure", "=", "None", ")", ":", "self", ".", "designs", "=", "[", "]", "a...
Finds all optimal experimental designs using up to :attr:`max_experiments` experiments, such that each experiment has up to :attr:`max_stimuli` stimuli and :attr:`max_inhibitors` inhibitors. Each optimal experimental design is appended in the attribute :attr:`designs` as an instance of :class:`caspo.core.clamping.ClampingList`. Example:: >>> from caspo import core, design >>> networks = core.LogicalNetworkList.from_csv('behaviors.csv') >>> setup = core.Setup.from_json('setup.json') >>> designer = design.Designer(networks, setup) >>> designer.design(3, 2) >>> for i,d in enumerate(designer.designs): ... f = 'design-%s' % i ... d.to_csv(f, stimuli=self.setup.stimuli, inhibitors=self.setup.inhibitors) Parameters ---------- max_stimuli : int Maximum number of stimuli per experiment max_inhibitors : int Maximum number of inhibitors per experiment max_experiments : int Maximum number of experiments per design relax : boolean Whether to relax the full-pairwise networks discrimination (True) or not (False). If relax equals True, the number of experiments per design is fixed to :attr:`max_experiments` configure : callable Callable object responsible of setting clingo configuration
[ "Finds", "all", "optimal", "experimental", "designs", "using", "up", "to", ":", "attr", ":", "max_experiments", "experiments", "such", "that", "each", "experiment", "has", "up", "to", ":", "attr", ":", "max_stimuli", "stimuli", "and", ":", "attr", ":", "max_...
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/design.py#L92-L167
saghul/evergreen
evergreen/core/loop.py
EventLoop.current
def current(cls): """Get the current event loop singleton object. """ try: return _tls.loop except AttributeError: # create loop only for main thread if threading.current_thread().name == 'MainThread': _tls.loop = cls() return _tls.loop raise RuntimeError('there is no event loop created in the current thread')
python
def current(cls): """Get the current event loop singleton object. """ try: return _tls.loop except AttributeError: # create loop only for main thread if threading.current_thread().name == 'MainThread': _tls.loop = cls() return _tls.loop raise RuntimeError('there is no event loop created in the current thread')
[ "def", "current", "(", "cls", ")", ":", "try", ":", "return", "_tls", ".", "loop", "except", "AttributeError", ":", "# create loop only for main thread", "if", "threading", ".", "current_thread", "(", ")", ".", "name", "==", "'MainThread'", ":", "_tls", ".", ...
Get the current event loop singleton object.
[ "Get", "the", "current", "event", "loop", "singleton", "object", "." ]
train
https://github.com/saghul/evergreen/blob/22f22f45892f397c23c3e09e6ea1ad4c00b3add8/evergreen/core/loop.py#L109-L119
PGower/PyCanvas
pycanvas/apis/account_domain_lookups.py
AccountDomainLookupsAPI.search_account_domains
def search_account_domains(self, domain=None, latitude=None, longitude=None, name=None): """ Search account domains. Returns a list of up to 5 matching account domains Partial match on name / domain are supported """ path = {} data = {} params = {} # OPTIONAL - name """campus name""" if name is not None: params["name"] = name # OPTIONAL - domain """no description""" if domain is not None: params["domain"] = domain # OPTIONAL - latitude """no description""" if latitude is not None: params["latitude"] = latitude # OPTIONAL - longitude """no description""" if longitude is not None: params["longitude"] = longitude self.logger.debug("GET /api/v1/accounts/search with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/search".format(**path), data=data, params=params, no_data=True)
python
def search_account_domains(self, domain=None, latitude=None, longitude=None, name=None): """ Search account domains. Returns a list of up to 5 matching account domains Partial match on name / domain are supported """ path = {} data = {} params = {} # OPTIONAL - name """campus name""" if name is not None: params["name"] = name # OPTIONAL - domain """no description""" if domain is not None: params["domain"] = domain # OPTIONAL - latitude """no description""" if latitude is not None: params["latitude"] = latitude # OPTIONAL - longitude """no description""" if longitude is not None: params["longitude"] = longitude self.logger.debug("GET /api/v1/accounts/search with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/search".format(**path), data=data, params=params, no_data=True)
[ "def", "search_account_domains", "(", "self", ",", "domain", "=", "None", ",", "latitude", "=", "None", ",", "longitude", "=", "None", ",", "name", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# OPT...
Search account domains. Returns a list of up to 5 matching account domains Partial match on name / domain are supported
[ "Search", "account", "domains", ".", "Returns", "a", "list", "of", "up", "to", "5", "matching", "account", "domains", "Partial", "match", "on", "name", "/", "domain", "are", "supported" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/account_domain_lookups.py#L19-L52
bharadwaj-raju/libdesktop
libdesktop/volume.py
set_volume
def set_volume(percentage): '''Set the volume. Sets the volume to a given percentage (integer between 0 and 100). Args: percentage (int): The percentage (as a 0 to 100 integer) to set the volume to. Raises: ValueError: if the percentage is >100 or <0. ''' if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': # OS X uses 0-10 instead of percentage volume_int = percentage / 10 sp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait() else: # Linux/Unix formatted = str(percentage) + '%' sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
python
def set_volume(percentage): '''Set the volume. Sets the volume to a given percentage (integer between 0 and 100). Args: percentage (int): The percentage (as a 0 to 100 integer) to set the volume to. Raises: ValueError: if the percentage is >100 or <0. ''' if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': # OS X uses 0-10 instead of percentage volume_int = percentage / 10 sp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait() else: # Linux/Unix formatted = str(percentage) + '%' sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
[ "def", "set_volume", "(", "percentage", ")", ":", "if", "percentage", ">", "100", "or", "percentage", "<", "0", ":", "raise", "ValueError", "(", "'percentage must be an integer between 0 and 100'", ")", "if", "system", ".", "get_name", "(", ")", "==", "'windows'...
Set the volume. Sets the volume to a given percentage (integer between 0 and 100). Args: percentage (int): The percentage (as a 0 to 100 integer) to set the volume to. Raises: ValueError: if the percentage is >100 or <0.
[ "Set", "the", "volume", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L32-L61
bharadwaj-raju/libdesktop
libdesktop/volume.py
get_volume
def get_volume(): '''Get the volume. Get the current volume. Returns: int: The current volume (percentage, between 0 and 100). ''' if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume = system.get_cmd_out( ['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol']) return int(volume) * 10 else: # Linux/Unix volume = system.get_cmd_out( ('amixer get Master |grep % |awk \'{print $5}\'|' 'sed -e \'s/\[//\' -e \'s/\]//\' | head -n1')) return int(volume.replace('%', ''))
python
def get_volume(): '''Get the volume. Get the current volume. Returns: int: The current volume (percentage, between 0 and 100). ''' if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume = system.get_cmd_out( ['osascript', '-e', 'set ovol to output volume of (get volume settings); return the quoted form of ovol']) return int(volume) * 10 else: # Linux/Unix volume = system.get_cmd_out( ('amixer get Master |grep % |awk \'{print $5}\'|' 'sed -e \'s/\[//\' -e \'s/\]//\' | head -n1')) return int(volume.replace('%', ''))
[ "def", "get_volume", "(", ")", ":", "if", "system", ".", "get_name", "(", ")", "==", "'windows'", ":", "# TODO: Implement volume for Windows. Looks like WinAPI is the", "# solution...", "pass", "elif", "system", ".", "get_name", "(", ")", "==", "'mac'", ":", "volu...
Get the volume. Get the current volume. Returns: int: The current volume (percentage, between 0 and 100).
[ "Get", "the", "volume", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L64-L88
bharadwaj-raju/libdesktop
libdesktop/volume.py
increase_volume
def increase_volume(percentage): '''Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0. ''' if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume_int = percentage / 10 old_volume = get() new_volume = old_volume + volume_int if new_volume > 10: new_volume = 10 set_volume(new_volume * 10) else: # Linux/Unix formatted = '%d%%+' % percentage # + or - increases/decreases in amixer sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
python
def increase_volume(percentage): '''Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0. ''' if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume_int = percentage / 10 old_volume = get() new_volume = old_volume + volume_int if new_volume > 10: new_volume = 10 set_volume(new_volume * 10) else: # Linux/Unix formatted = '%d%%+' % percentage # + or - increases/decreases in amixer sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
[ "def", "increase_volume", "(", "percentage", ")", ":", "if", "percentage", ">", "100", "or", "percentage", "<", "0", ":", "raise", "ValueError", "(", "'percentage must be an integer between 0 and 100'", ")", "if", "system", ".", "get_name", "(", ")", "==", "'win...
Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0.
[ "Increase", "the", "volume", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L91-L127
bharadwaj-raju/libdesktop
libdesktop/volume.py
mute
def mute(): '''Mute the volume. Mutes the volume. ''' # NOTE: mute != 0 volume if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': sp.Popen(['osascript', '-e', 'set volume output muted true']).wait() else: # Linux/Unix if unix_is_pulseaudio_server(): sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset', 'Master', 'mute']).wait() # sset is *not* a typo else: sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'mute']).wait()
python
def mute(): '''Mute the volume. Mutes the volume. ''' # NOTE: mute != 0 volume if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': sp.Popen(['osascript', '-e', 'set volume output muted true']).wait() else: # Linux/Unix if unix_is_pulseaudio_server(): sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset', 'Master', 'mute']).wait() # sset is *not* a typo else: sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'mute']).wait()
[ "def", "mute", "(", ")", ":", "# NOTE: mute != 0 volume", "if", "system", ".", "get_name", "(", ")", "==", "'windows'", ":", "# TODO: Implement volume for Windows. Looks like WinAPI is the", "# solution...", "pass", "elif", "system", ".", "get_name", "(", ")", "==", ...
Mute the volume. Mutes the volume.
[ "Mute", "the", "volume", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L181-L204
bharadwaj-raju/libdesktop
libdesktop/volume.py
unmute
def unmute(): '''Unmute the volume. Unmutes the system volume. Note: On some systems, volume is restored to its previous level after unmute, or set to 100. ''' if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': sp.Popen(['osascript', '-e', 'set volume output muted false']).wait() else: # Linux/Unix if unix_is_pulseaudio_server(): sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset', 'Master', 'unmute']).wait() # sset is *not* a typo else: sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'unmute']).wait()
python
def unmute(): '''Unmute the volume. Unmutes the system volume. Note: On some systems, volume is restored to its previous level after unmute, or set to 100. ''' if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': sp.Popen(['osascript', '-e', 'set volume output muted false']).wait() else: # Linux/Unix if unix_is_pulseaudio_server(): sp.Popen(['amixer', '--quiet', '-D', 'pulse', 'sset', 'Master', 'unmute']).wait() # sset is *not* a typo else: sp.Popen(['amixer', '--quiet', 'sset', 'Master', 'unmute']).wait()
[ "def", "unmute", "(", ")", ":", "if", "system", ".", "get_name", "(", ")", "==", "'windows'", ":", "# TODO: Implement volume for Windows. Looks like WinAPI is the", "# solution...", "pass", "elif", "system", ".", "get_name", "(", ")", "==", "'mac'", ":", "sp", "...
Unmute the volume. Unmutes the system volume. Note: On some systems, volume is restored to its previous level after unmute, or set to 100.
[ "Unmute", "the", "volume", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/volume.py#L207-L231
anomaly/prestans
prestans/rest/request_handler.py
RequestHandler._setup_serializers
def _setup_serializers(self): """ Auto set the return serializer based on Accept headers http://docs.webob.org/en/latest/reference.html#header-getters Intersection of requested types and supported types tells us if we can in fact respond in one of the request formats """ acceptable_offers = self.request.accept.acceptable_offers(self.response.supported_mime_types) if len(acceptable_offers) > 0: best_accept_match = acceptable_offers[0][0] else: best_accept_match = self.response.default_serializer.content_type() # best_accept_match = self.request.accept.best_match( # self.response.supported_mime_types, # default_match=self.response.default_serializer.content_type() # ) self.logger.info("%s determined as best match for accept header: %s" % ( best_accept_match, self.request.accept )) # if content_type is not acceptable it will raise UnsupportedVocabulary self.response.content_type = best_accept_match
python
def _setup_serializers(self): """ Auto set the return serializer based on Accept headers http://docs.webob.org/en/latest/reference.html#header-getters Intersection of requested types and supported types tells us if we can in fact respond in one of the request formats """ acceptable_offers = self.request.accept.acceptable_offers(self.response.supported_mime_types) if len(acceptable_offers) > 0: best_accept_match = acceptable_offers[0][0] else: best_accept_match = self.response.default_serializer.content_type() # best_accept_match = self.request.accept.best_match( # self.response.supported_mime_types, # default_match=self.response.default_serializer.content_type() # ) self.logger.info("%s determined as best match for accept header: %s" % ( best_accept_match, self.request.accept )) # if content_type is not acceptable it will raise UnsupportedVocabulary self.response.content_type = best_accept_match
[ "def", "_setup_serializers", "(", "self", ")", ":", "acceptable_offers", "=", "self", ".", "request", ".", "accept", ".", "acceptable_offers", "(", "self", ".", "response", ".", "supported_mime_types", ")", "if", "len", "(", "acceptable_offers", ")", ">", "0",...
Auto set the return serializer based on Accept headers http://docs.webob.org/en/latest/reference.html#header-getters Intersection of requested types and supported types tells us if we can in fact respond in one of the request formats
[ "Auto", "set", "the", "return", "serializer", "based", "on", "Accept", "headers", "http", ":", "//", "docs", ".", "webob", ".", "org", "/", "en", "/", "latest", "/", "reference", ".", "html#header", "-", "getters" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/rest/request_handler.py#L113-L138
theonion/django-bulbs
bulbs/content/south_migrations/0003_migrate_feature_types.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." rows = db.execute("select distinct feature_type from content_content") for row in rows: feature_type = row[0] try: ft = orm.FeatureType.objects.get(slug=slugify(feature_type)) except orm.FeatureType.DoesNotExist: ft = orm.FeatureType.objects.create( name=feature_type, slug=slugify(feature_type) ) db.execute("update content_content set feature_type_id = %s where feature_type = %s", [ft.id, feature_type])
python
def forwards(self, orm): "Write your forwards methods here." rows = db.execute("select distinct feature_type from content_content") for row in rows: feature_type = row[0] try: ft = orm.FeatureType.objects.get(slug=slugify(feature_type)) except orm.FeatureType.DoesNotExist: ft = orm.FeatureType.objects.create( name=feature_type, slug=slugify(feature_type) ) db.execute("update content_content set feature_type_id = %s where feature_type = %s", [ft.id, feature_type])
[ "def", "forwards", "(", "self", ",", "orm", ")", ":", "rows", "=", "db", ".", "execute", "(", "\"select distinct feature_type from content_content\"", ")", "for", "row", "in", "rows", ":", "feature_type", "=", "row", "[", "0", "]", "try", ":", "ft", "=", ...
Write your forwards methods here.
[ "Write", "your", "forwards", "methods", "here", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/content/south_migrations/0003_migrate_feature_types.py#L28-L41
PGower/PyCanvas
pycanvas/apis/authentications_log.py
AuthenticationsLogAPI.query_by_login
def query_by_login(self, login_id, end_time=None, start_time=None): """ Query by login. List authentication events for a given login. """ path = {} data = {} params = {} # REQUIRED - PATH - login_id """ID""" path["login_id"] = login_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True)
python
def query_by_login(self, login_id, end_time=None, start_time=None): """ Query by login. List authentication events for a given login. """ path = {} data = {} params = {} # REQUIRED - PATH - login_id """ID""" path["login_id"] = login_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/logins/{login_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/logins/{login_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "query_by_login", "(", "self", ",", "login_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - login_id\r", "\"\"\"ID\"\"\"", "path"...
Query by login. List authentication events for a given login.
[ "Query", "by", "login", ".", "List", "authentication", "events", "for", "a", "given", "login", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/authentications_log.py#L19-L44
PGower/PyCanvas
pycanvas/apis/authentications_log.py
AuthenticationsLogAPI.query_by_account
def query_by_account(self, account_id, end_time=None, start_time=None): """ Query by account. List authentication events for a given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True)
python
def query_by_account(self, account_id, end_time=None, start_time=None): """ Query by account. List authentication events for a given account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "query_by_account", "(", "self", ",", "account_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", ...
Query by account. List authentication events for a given account.
[ "Query", "by", "account", ".", "List", "authentication", "events", "for", "a", "given", "account", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/authentications_log.py#L46-L71
PGower/PyCanvas
pycanvas/apis/authentications_log.py
AuthenticationsLogAPI.query_by_user
def query_by_user(self, user_id, end_time=None, start_time=None): """ Query by user. List authentication events for a given user. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/users/{user_id}".format(**path), data=data, params=params, no_data=True)
python
def query_by_user(self, user_id, end_time=None, start_time=None): """ Query by user. List authentication events for a given user. """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # OPTIONAL - start_time """The beginning of the time range from which you want events.""" if start_time is not None: params["start_time"] = start_time # OPTIONAL - end_time """The end of the time range from which you want events.""" if end_time is not None: params["end_time"] = end_time self.logger.debug("GET /api/v1/audit/authentication/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/audit/authentication/users/{user_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "query_by_user", "(", "self", ",", "user_id", ",", "end_time", "=", "None", ",", "start_time", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", ...
Query by user. List authentication events for a given user.
[ "Query", "by", "user", ".", "List", "authentication", "events", "for", "a", "given", "user", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/authentications_log.py#L73-L98
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
GroupDict._items
def _items(self, type_filter=None, name=None): """ Args: type_filter(list): Optional iterable of types to return (GroupDict only) name(str): Only return key by this name Alternative generator for items() method """ if name: if type_filter and self._key_attr == 'type': if name in type_filter and name in self: yield name, self[name] elif name in self: yield name, self[name] elif type_filter and self._key_attr == 'type': for key, val in self.items(): if key in type_filter: yield key, val else: for key, val in self.items(): yield key, val
python
def _items(self, type_filter=None, name=None): """ Args: type_filter(list): Optional iterable of types to return (GroupDict only) name(str): Only return key by this name Alternative generator for items() method """ if name: if type_filter and self._key_attr == 'type': if name in type_filter and name in self: yield name, self[name] elif name in self: yield name, self[name] elif type_filter and self._key_attr == 'type': for key, val in self.items(): if key in type_filter: yield key, val else: for key, val in self.items(): yield key, val
[ "def", "_items", "(", "self", ",", "type_filter", "=", "None", ",", "name", "=", "None", ")", ":", "if", "name", ":", "if", "type_filter", "and", "self", ".", "_key_attr", "==", "'type'", ":", "if", "name", "in", "type_filter", "and", "name", "in", "...
Args: type_filter(list): Optional iterable of types to return (GroupDict only) name(str): Only return key by this name Alternative generator for items() method
[ "Args", ":", "type_filter", "(", "list", ")", ":", "Optional", "iterable", "of", "types", "to", "return", "(", "GroupDict", "only", ")", "name", "(", "str", ")", ":", "Only", "return", "key", "by", "this", "name" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L95-L117
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
GroupDict._filter
def _filter(self, blacklist=None, newest_only=False, type_filter=None, **kwargs): """ Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned type(str): Plugin type to retrieve name(str): Plugin name to retrieve version(str): Plugin version to retrieve Returns nested dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries """ plugins = DictWithDotNotation() filtered_name = kwargs.get(self._key_attr, None) for key, val in self._items(type_filter, filtered_name): plugin_blacklist = None skip = False if blacklist: # Assume blacklist is correct format since it is checked by PluginLoade plugin_blacklist = [] for entry in blacklist: if getattr(entry, self._key_attr) not in (key, None): continue if all(getattr(entry, attr) is None for attr in self._bl_skip_attrs): if not self._skip_empty: plugins[key] = None if filtered_name else self._bl_empty() skip = True break plugin_blacklist.append(entry) if not skip: # pylint: disable=protected-access result = val._filter(plugin_blacklist, newest_only=newest_only, **kwargs) if result or not self._skip_empty: plugins[key] = result if filtered_name: return plugins.get(filtered_name, None) return plugins
python
def _filter(self, blacklist=None, newest_only=False, type_filter=None, **kwargs): """ Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned type(str): Plugin type to retrieve name(str): Plugin name to retrieve version(str): Plugin version to retrieve Returns nested dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries """ plugins = DictWithDotNotation() filtered_name = kwargs.get(self._key_attr, None) for key, val in self._items(type_filter, filtered_name): plugin_blacklist = None skip = False if blacklist: # Assume blacklist is correct format since it is checked by PluginLoade plugin_blacklist = [] for entry in blacklist: if getattr(entry, self._key_attr) not in (key, None): continue if all(getattr(entry, attr) is None for attr in self._bl_skip_attrs): if not self._skip_empty: plugins[key] = None if filtered_name else self._bl_empty() skip = True break plugin_blacklist.append(entry) if not skip: # pylint: disable=protected-access result = val._filter(plugin_blacklist, newest_only=newest_only, **kwargs) if result or not self._skip_empty: plugins[key] = result if filtered_name: return plugins.get(filtered_name, None) return plugins
[ "def", "_filter", "(", "self", ",", "blacklist", "=", "None", ",", "newest_only", "=", "False", ",", "type_filter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "plugins", "=", "DictWithDotNotation", "(", ")", "filtered_name", "=", "kwargs", ".", "get...
Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned type(str): Plugin type to retrieve name(str): Plugin name to retrieve version(str): Plugin version to retrieve Returns nested dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries
[ "Args", ":", "blacklist", "(", "tuple", ")", ":", "Iterable", "of", "of", "BlacklistEntry", "objects", "newest_only", "(", "bool", ")", ":", "Only", "the", "newest", "version", "of", "each", "plugin", "is", "returned", "type", "(", "str", ")", ":", "Plug...
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L119-L165
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
PluginDict._sorted_keys
def _sorted_keys(self): """ Return list of keys sorted by version Sorting is done based on :py:func:`pkg_resources.parse_version` """ try: keys = self._cache['sorted_keys'] except KeyError: keys = self._cache['sorted_keys'] = sorted(self.keys(), key=parse_version) return keys
python
def _sorted_keys(self): """ Return list of keys sorted by version Sorting is done based on :py:func:`pkg_resources.parse_version` """ try: keys = self._cache['sorted_keys'] except KeyError: keys = self._cache['sorted_keys'] = sorted(self.keys(), key=parse_version) return keys
[ "def", "_sorted_keys", "(", "self", ")", ":", "try", ":", "keys", "=", "self", ".", "_cache", "[", "'sorted_keys'", "]", "except", "KeyError", ":", "keys", "=", "self", ".", "_cache", "[", "'sorted_keys'", "]", "=", "sorted", "(", "self", ".", "keys", ...
Return list of keys sorted by version Sorting is done based on :py:func:`pkg_resources.parse_version`
[ "Return", "list", "of", "keys", "sorted", "by", "version" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L188-L200
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
PluginDict._process_blacklist
def _process_blacklist(self, blacklist): """ Process blacklist into set of excluded versions """ # Assume blacklist is correct format since it is checked by PluginLoader blacklist_cache = {} blacklist_cache_old = self._cache.get('blacklist', {}) for entry in blacklist: blackkey = (entry.version, entry.operator) if blackkey in blacklist_cache: continue elif blackkey in blacklist_cache_old: blacklist_cache[blackkey] = blacklist_cache_old[blackkey] else: entry_cache = blacklist_cache[blackkey] = set() blackversion = parse_version(entry.version or '0') blackop = OPERATORS[entry.operator] for key in self: if blackop(parse_version(key), blackversion): entry_cache.add(key) self._cache['blacklist'] = blacklist_cache return set().union(*blacklist_cache.values())
python
def _process_blacklist(self, blacklist): """ Process blacklist into set of excluded versions """ # Assume blacklist is correct format since it is checked by PluginLoader blacklist_cache = {} blacklist_cache_old = self._cache.get('blacklist', {}) for entry in blacklist: blackkey = (entry.version, entry.operator) if blackkey in blacklist_cache: continue elif blackkey in blacklist_cache_old: blacklist_cache[blackkey] = blacklist_cache_old[blackkey] else: entry_cache = blacklist_cache[blackkey] = set() blackversion = parse_version(entry.version or '0') blackop = OPERATORS[entry.operator] for key in self: if blackop(parse_version(key), blackversion): entry_cache.add(key) self._cache['blacklist'] = blacklist_cache return set().union(*blacklist_cache.values())
[ "def", "_process_blacklist", "(", "self", ",", "blacklist", ")", ":", "# Assume blacklist is correct format since it is checked by PluginLoader", "blacklist_cache", "=", "{", "}", "blacklist_cache_old", "=", "self", ".", "_cache", ".", "get", "(", "'blacklist'", ",", "{...
Process blacklist into set of excluded versions
[ "Process", "blacklist", "into", "set", "of", "excluded", "versions" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L202-L230
Rockhopper-Technologies/pluginlib
pluginlib/_objects.py
PluginDict._filter
def _filter(self, blacklist=None, newest_only=False, **kwargs): """ Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned version(str): Specific version to retrieve Returns dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries """ version = kwargs.get('version', None) rtn = None if self: # Dict is not empty if blacklist: blacklist = self._process_blacklist(blacklist) if version: if version not in blacklist: rtn = self.get(version, None) elif newest_only: for key in reversed(self._sorted_keys()): if key not in blacklist: rtn = self[key] break # If no keys are left, None will be returned else: rtn = dict((key, val) for key, val in self.items() if key not in blacklist) \ or None elif version: rtn = self.get(version, None) elif newest_only: rtn = self[self._sorted_keys()[-1]] else: rtn = dict(self) return rtn
python
def _filter(self, blacklist=None, newest_only=False, **kwargs): """ Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned version(str): Specific version to retrieve Returns dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries """ version = kwargs.get('version', None) rtn = None if self: # Dict is not empty if blacklist: blacklist = self._process_blacklist(blacklist) if version: if version not in blacklist: rtn = self.get(version, None) elif newest_only: for key in reversed(self._sorted_keys()): if key not in blacklist: rtn = self[key] break # If no keys are left, None will be returned else: rtn = dict((key, val) for key, val in self.items() if key not in blacklist) \ or None elif version: rtn = self.get(version, None) elif newest_only: rtn = self[self._sorted_keys()[-1]] else: rtn = dict(self) return rtn
[ "def", "_filter", "(", "self", ",", "blacklist", "=", "None", ",", "newest_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "version", "=", "kwargs", ".", "get", "(", "'version'", ",", "None", ")", "rtn", "=", "None", "if", "self", ":", "# D...
Args: blacklist(tuple): Iterable of of BlacklistEntry objects newest_only(bool): Only the newest version of each plugin is returned version(str): Specific version to retrieve Returns dictionary of plugins If a blacklist is supplied, plugins are evaluated against the blacklist entries
[ "Args", ":", "blacklist", "(", "tuple", ")", ":", "Iterable", "of", "of", "BlacklistEntry", "objects", "newest_only", "(", "bool", ")", ":", "Only", "the", "newest", "version", "of", "each", "plugin", "is", "returned", "version", "(", "str", ")", ":", "S...
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_objects.py#L232-L276
fmalina/bng_latlon
bng_to_latlon.py
OSGB36toWGS84
def OSGB36toWGS84(E, N): """ Accept The Ordnance Survey National Grid eastings and northings. Return latitude and longitude coordinates. Usage: >>> from bng_to_latlon import OSGB36toWGS84 >>> OSGB36toWGS84(538890, 177320) (51.47779538331092, -0.0014016837826672265) >>> OSGB36toWGS84(352500.2, 401400) (53.507129843104195, -2.7176599627343263) """ # The Airy 1830 semi-major and semi-minor axes used for OSGB36 (m) a, b = 6377563.396, 6356256.909 F0 = 0.9996012717 # scale factor on the central meridian # Latitude and longtitude of true origin (radians) lat0 = 49*pi/180 lon0 = -2*pi/180 # longtitude of central meridian # Northing & easting of true origin (m) N0, E0 = -100000, 400000 e2 = 1 - (b*b)/(a*a) # eccentricity squared n = (a-b)/(a+b) # Initialise the iterative variables lat, M = lat0, 0 while N-N0-M >= 0.00001: # Accurate to 0.01mm lat = (N-N0-M)/(a*F0) + lat M1 = (1 + n + (5./4)*n**2 + (5./4)*n**3) * (lat-lat0) M2 = (3*n + 3*n**2 + (21./8)*n**3) * sin(lat-lat0) * cos(lat+lat0) M3 = ((15./8)*n**2 + (15./8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0)) M4 = (35./24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0)) # meridional arc M = b * F0 * (M1 - M2 + M3 - M4) # transverse radius of curvature nu = a*F0/sqrt(1-e2*sin(lat)**2) # meridional radius of curvature rho = a*F0*(1-e2)*(1-e2*sin(lat)**2)**(-1.5) eta2 = nu/rho-1 sec_lat = 1./cos(lat) VII = tan(lat)/(2*rho*nu) VIII = tan(lat)/(24*rho*nu**3)*(5+3*tan(lat)**2+eta2-9*tan(lat)**2*eta2) IX = tan(lat)/(720*rho*nu**5)*(61+90*tan(lat)**2+45*tan(lat)**4) X = sec_lat/nu XI = sec_lat/(6*nu**3)*(nu/rho+2*tan(lat)**2) XII = sec_lat/(120*nu**5)*(5+28*tan(lat)**2+24*tan(lat)**4) XIIA = sec_lat/(5040*nu**7)*(61+662*tan(lat)**2+1320*tan(lat)**4+720*tan(lat)**6) dE = E-E0 # These are on the wrong ellipsoid currently: Airy 1830 (denoted by _1) lat_1 = lat - VII*dE**2 + VIII*dE**4 - IX*dE**6 lon_1 = lon0 + X*dE - XI*dE**3 + XII*dE**5 - XIIA*dE**7 # Want to convert to the GRS80 ellipsoid. # First convert to cartesian from spherical polar coordinates H = 0 # Third spherical coord. x_1 = (nu/F0 + H)*cos(lat_1)*cos(lon_1) y_1 = (nu/F0 + H)*cos(lat_1)*sin(lon_1) z_1 = ((1-e2)*nu/F0 + H)*sin(lat_1) # Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2)) s = -20.4894*10**-6 # The scale factor -1 # The translations along x, y, z axes respectively tx, ty, tz = 446.448, -125.157, + 542.060 # The rotations along x, y, z respectively (in seconds) rxs, rys, rzs = 0.1502, 0.2470, 0.8421 # convert seconds to radians def sec_to_rad(x): return x*pi/(180*3600.) rx, ry, rz = [sec_to_rad(x) for x in (rxs, rys, rzs)] # (in radians) x_2 = tx + (1+s)*x_1 + (-rz)*y_1 + (ry)*z_1 y_2 = ty + (rz)*x_1 + (1+s)*y_1 + (-rx)*z_1 z_2 = tz + (-ry)*x_1 + (rx)*y_1 + (1+s)*z_1 # Back to spherical polar coordinates from cartesian # Need some of the characteristics of the new ellipsoid # The GSR80 semi-major and semi-minor axes used for WGS84(m) a_2, b_2 = 6378137.000, 6356752.3141 e2_2 = 1 - (b_2*b_2)/(a_2*a_2) # The eccentricity of the GRS80 ellipsoid p = sqrt(x_2**2 + y_2**2) # Lat is obtained by an iterative proceedure: lat = atan2(z_2, (p*(1-e2_2))) # Initial value latold = 2*pi while abs(lat - latold) > 10**-16: lat, latold = latold, lat nu_2 = a_2/sqrt(1-e2_2*sin(latold)**2) lat = atan2(z_2+e2_2*nu_2*sin(latold), p) # Lon and height are then pretty easy lon = atan2(y_2, x_2) H = p/cos(lat) - nu_2 # Uncomment this line if you want to print the results # print([(lat-lat_1)*180/pi, (lon - lon_1)*180/pi]) # Convert to degrees lat = lat*180/pi lon = lon*180/pi # Job's a good'n. return lat, lon
python
def OSGB36toWGS84(E, N): """ Accept The Ordnance Survey National Grid eastings and northings. Return latitude and longitude coordinates. Usage: >>> from bng_to_latlon import OSGB36toWGS84 >>> OSGB36toWGS84(538890, 177320) (51.47779538331092, -0.0014016837826672265) >>> OSGB36toWGS84(352500.2, 401400) (53.507129843104195, -2.7176599627343263) """ # The Airy 1830 semi-major and semi-minor axes used for OSGB36 (m) a, b = 6377563.396, 6356256.909 F0 = 0.9996012717 # scale factor on the central meridian # Latitude and longtitude of true origin (radians) lat0 = 49*pi/180 lon0 = -2*pi/180 # longtitude of central meridian # Northing & easting of true origin (m) N0, E0 = -100000, 400000 e2 = 1 - (b*b)/(a*a) # eccentricity squared n = (a-b)/(a+b) # Initialise the iterative variables lat, M = lat0, 0 while N-N0-M >= 0.00001: # Accurate to 0.01mm lat = (N-N0-M)/(a*F0) + lat M1 = (1 + n + (5./4)*n**2 + (5./4)*n**3) * (lat-lat0) M2 = (3*n + 3*n**2 + (21./8)*n**3) * sin(lat-lat0) * cos(lat+lat0) M3 = ((15./8)*n**2 + (15./8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0)) M4 = (35./24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0)) # meridional arc M = b * F0 * (M1 - M2 + M3 - M4) # transverse radius of curvature nu = a*F0/sqrt(1-e2*sin(lat)**2) # meridional radius of curvature rho = a*F0*(1-e2)*(1-e2*sin(lat)**2)**(-1.5) eta2 = nu/rho-1 sec_lat = 1./cos(lat) VII = tan(lat)/(2*rho*nu) VIII = tan(lat)/(24*rho*nu**3)*(5+3*tan(lat)**2+eta2-9*tan(lat)**2*eta2) IX = tan(lat)/(720*rho*nu**5)*(61+90*tan(lat)**2+45*tan(lat)**4) X = sec_lat/nu XI = sec_lat/(6*nu**3)*(nu/rho+2*tan(lat)**2) XII = sec_lat/(120*nu**5)*(5+28*tan(lat)**2+24*tan(lat)**4) XIIA = sec_lat/(5040*nu**7)*(61+662*tan(lat)**2+1320*tan(lat)**4+720*tan(lat)**6) dE = E-E0 # These are on the wrong ellipsoid currently: Airy 1830 (denoted by _1) lat_1 = lat - VII*dE**2 + VIII*dE**4 - IX*dE**6 lon_1 = lon0 + X*dE - XI*dE**3 + XII*dE**5 - XIIA*dE**7 # Want to convert to the GRS80 ellipsoid. # First convert to cartesian from spherical polar coordinates H = 0 # Third spherical coord. x_1 = (nu/F0 + H)*cos(lat_1)*cos(lon_1) y_1 = (nu/F0 + H)*cos(lat_1)*sin(lon_1) z_1 = ((1-e2)*nu/F0 + H)*sin(lat_1) # Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2)) s = -20.4894*10**-6 # The scale factor -1 # The translations along x, y, z axes respectively tx, ty, tz = 446.448, -125.157, + 542.060 # The rotations along x, y, z respectively (in seconds) rxs, rys, rzs = 0.1502, 0.2470, 0.8421 # convert seconds to radians def sec_to_rad(x): return x*pi/(180*3600.) rx, ry, rz = [sec_to_rad(x) for x in (rxs, rys, rzs)] # (in radians) x_2 = tx + (1+s)*x_1 + (-rz)*y_1 + (ry)*z_1 y_2 = ty + (rz)*x_1 + (1+s)*y_1 + (-rx)*z_1 z_2 = tz + (-ry)*x_1 + (rx)*y_1 + (1+s)*z_1 # Back to spherical polar coordinates from cartesian # Need some of the characteristics of the new ellipsoid # The GSR80 semi-major and semi-minor axes used for WGS84(m) a_2, b_2 = 6378137.000, 6356752.3141 e2_2 = 1 - (b_2*b_2)/(a_2*a_2) # The eccentricity of the GRS80 ellipsoid p = sqrt(x_2**2 + y_2**2) # Lat is obtained by an iterative proceedure: lat = atan2(z_2, (p*(1-e2_2))) # Initial value latold = 2*pi while abs(lat - latold) > 10**-16: lat, latold = latold, lat nu_2 = a_2/sqrt(1-e2_2*sin(latold)**2) lat = atan2(z_2+e2_2*nu_2*sin(latold), p) # Lon and height are then pretty easy lon = atan2(y_2, x_2) H = p/cos(lat) - nu_2 # Uncomment this line if you want to print the results # print([(lat-lat_1)*180/pi, (lon - lon_1)*180/pi]) # Convert to degrees lat = lat*180/pi lon = lon*180/pi # Job's a good'n. return lat, lon
[ "def", "OSGB36toWGS84", "(", "E", ",", "N", ")", ":", "# The Airy 1830 semi-major and semi-minor axes used for OSGB36 (m)", "a", ",", "b", "=", "6377563.396", ",", "6356256.909", "F0", "=", "0.9996012717", "# scale factor on the central meridian", "# Latitude and longtitude o...
Accept The Ordnance Survey National Grid eastings and northings. Return latitude and longitude coordinates. Usage: >>> from bng_to_latlon import OSGB36toWGS84 >>> OSGB36toWGS84(538890, 177320) (51.47779538331092, -0.0014016837826672265) >>> OSGB36toWGS84(352500.2, 401400) (53.507129843104195, -2.7176599627343263)
[ "Accept", "The", "Ordnance", "Survey", "National", "Grid", "eastings", "and", "northings", ".", "Return", "latitude", "and", "longitude", "coordinates", "." ]
train
https://github.com/fmalina/bng_latlon/blob/b0251174b5248e8ade8098a31e754dcd0b157060/bng_to_latlon.py#L10-L117
jaraco/hgtools
hgtools/versioning.py
find
def find(pred, items): """ Find the index of the first element in items for which pred returns True >>> find(lambda x: x > 3, range(100)) 4 >>> find(lambda x: x < -3, range(100)) is None True """ for i, item in enumerate(items): if pred(item): return i
python
def find(pred, items): """ Find the index of the first element in items for which pred returns True >>> find(lambda x: x > 3, range(100)) 4 >>> find(lambda x: x < -3, range(100)) is None True """ for i, item in enumerate(items): if pred(item): return i
[ "def", "find", "(", "pred", ",", "items", ")", ":", "for", "i", ",", "item", "in", "enumerate", "(", "items", ")", ":", "if", "pred", "(", "item", ")", ":", "return", "i" ]
Find the index of the first element in items for which pred returns True >>> find(lambda x: x > 3, range(100)) 4 >>> find(lambda x: x < -3, range(100)) is None True
[ "Find", "the", "index", "of", "the", "first", "element", "in", "items", "for", "which", "pred", "returns", "True" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L11-L23
jaraco/hgtools
hgtools/versioning.py
SummableVersion.reset_less_significant
def reset_less_significant(self, significant_version): """ Reset to zero all version info less significant than the indicated version. >>> ver = SummableVersion('3.1.2') >>> ver.reset_less_significant(SummableVersion('0.1')) >>> str(ver) '3.1' """ def nonzero(x): return x != 0 version_len = 3 # strict versions are always a tuple of 3 significant_pos = rfind(nonzero, significant_version.version) significant_pos = version_len + significant_pos + 1 self.version = ( self.version[:significant_pos] + (0,) * (version_len - significant_pos))
python
def reset_less_significant(self, significant_version): """ Reset to zero all version info less significant than the indicated version. >>> ver = SummableVersion('3.1.2') >>> ver.reset_less_significant(SummableVersion('0.1')) >>> str(ver) '3.1' """ def nonzero(x): return x != 0 version_len = 3 # strict versions are always a tuple of 3 significant_pos = rfind(nonzero, significant_version.version) significant_pos = version_len + significant_pos + 1 self.version = ( self.version[:significant_pos] + (0,) * (version_len - significant_pos))
[ "def", "reset_less_significant", "(", "self", ",", "significant_version", ")", ":", "def", "nonzero", "(", "x", ")", ":", "return", "x", "!=", "0", "version_len", "=", "3", "# strict versions are always a tuple of 3", "significant_pos", "=", "rfind", "(", "nonzero...
Reset to zero all version info less significant than the indicated version. >>> ver = SummableVersion('3.1.2') >>> ver.reset_less_significant(SummableVersion('0.1')) >>> str(ver) '3.1'
[ "Reset", "to", "zero", "all", "version", "info", "less", "significant", "than", "the", "indicated", "version", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L51-L68
jaraco/hgtools
hgtools/versioning.py
SummableVersion.as_number
def as_number(self): """ >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 """ def combine(subver, ver): return subver / 10 + ver return reduce(combine, reversed(self.version))
python
def as_number(self): """ >>> round(SummableVersion('1.9.3').as_number(), 12) 1.93 """ def combine(subver, ver): return subver / 10 + ver return reduce(combine, reversed(self.version))
[ "def", "as_number", "(", "self", ")", ":", "def", "combine", "(", "subver", ",", "ver", ")", ":", "return", "subver", "/", "10", "+", "ver", "return", "reduce", "(", "combine", ",", "reversed", "(", "self", ".", "version", ")", ")" ]
>>> round(SummableVersion('1.9.3').as_number(), 12) 1.93
[ ">>>", "round", "(", "SummableVersion", "(", "1", ".", "9", ".", "3", ")", ".", "as_number", "()", "12", ")", "1", ".", "93" ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L70-L77
jaraco/hgtools
hgtools/versioning.py
VersionManagement.get_tagged_version
def get_tagged_version(self): """ Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset. """ tags = list(self.get_tags()) if 'tip' in tags and not self.is_modified(): tags = self.get_parent_tags('tip') versions = self.__versions_from_tags(tags) return self.__best_version(versions)
python
def get_tagged_version(self): """ Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset. """ tags = list(self.get_tags()) if 'tip' in tags and not self.is_modified(): tags = self.get_parent_tags('tip') versions = self.__versions_from_tags(tags) return self.__best_version(versions)
[ "def", "get_tagged_version", "(", "self", ")", ":", "tags", "=", "list", "(", "self", ".", "get_tags", "(", ")", ")", "if", "'tip'", "in", "tags", "and", "not", "self", ".", "is_modified", "(", ")", ":", "tags", "=", "self", ".", "get_parent_tags", "...
Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset.
[ "Get", "the", "version", "of", "the", "local", "working", "set", "as", "a", "StrictVersion", "or", "None", "if", "no", "viable", "tag", "exists", ".", "If", "the", "local", "working", "set", "is", "itself", "the", "tagged", "commit", "and", "the", "tip",...
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L111-L122
jaraco/hgtools
hgtools/versioning.py
VersionManagement.get_current_version
def get_current_version(self, increment=None): """ Return as a string the version of the current state of the repository -- a tagged version, if present, or the next version based on prior tagged releases. """ ver = ( self.get_tagged_version() or str(self.get_next_version(increment)) + '.dev0' ) return str(ver)
python
def get_current_version(self, increment=None): """ Return as a string the version of the current state of the repository -- a tagged version, if present, or the next version based on prior tagged releases. """ ver = ( self.get_tagged_version() or str(self.get_next_version(increment)) + '.dev0' ) return str(ver)
[ "def", "get_current_version", "(", "self", ",", "increment", "=", "None", ")", ":", "ver", "=", "(", "self", ".", "get_tagged_version", "(", ")", "or", "str", "(", "self", ".", "get_next_version", "(", "increment", ")", ")", "+", "'.dev0'", ")", "return"...
Return as a string the version of the current state of the repository -- a tagged version, if present, or the next version based on prior tagged releases.
[ "Return", "as", "a", "string", "the", "version", "of", "the", "current", "state", "of", "the", "repository", "--", "a", "tagged", "version", "if", "present", "or", "the", "next", "version", "based", "on", "prior", "tagged", "releases", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L131-L141
jaraco/hgtools
hgtools/versioning.py
VersionManagement.get_next_version
def get_next_version(self, increment=None): """ Return the next version based on prior tagged releases. """ increment = increment or self.increment return self.infer_next_version(self.get_latest_version(), increment)
python
def get_next_version(self, increment=None): """ Return the next version based on prior tagged releases. """ increment = increment or self.increment return self.infer_next_version(self.get_latest_version(), increment)
[ "def", "get_next_version", "(", "self", ",", "increment", "=", "None", ")", ":", "increment", "=", "increment", "or", "self", ".", "increment", "return", "self", ".", "infer_next_version", "(", "self", ".", "get_latest_version", "(", ")", ",", "increment", "...
Return the next version based on prior tagged releases.
[ "Return", "the", "next", "version", "based", "on", "prior", "tagged", "releases", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L143-L148
jaraco/hgtools
hgtools/versioning.py
VersionManagement.infer_next_version
def infer_next_version(last_version, increment): """ Given a simple application version (as a StrictVersion), and an increment (1.0, 0.1, or 0.0.1), guess the next version. Set up a shorthand for examples >>> def VM_infer(*params): ... return str(VersionManagement.infer_next_version(*params)) >>> VM_infer('3.2', '0.0.1') '3.2.1' >>> VM_infer(StrictVersion('3.2'), '0.0.1') '3.2.1' >>> VM_infer('3.2.3', '0.1') '3.3' >>> VM_infer('3.1.2', '1.0') '4.0' Subversions never increment parent versions >>> VM_infer('3.0.9', '0.0.1') '3.0.10' If it's a prerelease version, just remove the prerelease. >>> VM_infer('3.1a1', '0.0.1') '3.1' If there is no last version, use the increment itself >>> VM_infer(None, '0.1') '0.1' """ if last_version is None: return increment last_version = SummableVersion(str(last_version)) if last_version.prerelease: last_version.prerelease = None return str(last_version) increment = SummableVersion(increment) sum = last_version + increment sum.reset_less_significant(increment) return sum
python
def infer_next_version(last_version, increment): """ Given a simple application version (as a StrictVersion), and an increment (1.0, 0.1, or 0.0.1), guess the next version. Set up a shorthand for examples >>> def VM_infer(*params): ... return str(VersionManagement.infer_next_version(*params)) >>> VM_infer('3.2', '0.0.1') '3.2.1' >>> VM_infer(StrictVersion('3.2'), '0.0.1') '3.2.1' >>> VM_infer('3.2.3', '0.1') '3.3' >>> VM_infer('3.1.2', '1.0') '4.0' Subversions never increment parent versions >>> VM_infer('3.0.9', '0.0.1') '3.0.10' If it's a prerelease version, just remove the prerelease. >>> VM_infer('3.1a1', '0.0.1') '3.1' If there is no last version, use the increment itself >>> VM_infer(None, '0.1') '0.1' """ if last_version is None: return increment last_version = SummableVersion(str(last_version)) if last_version.prerelease: last_version.prerelease = None return str(last_version) increment = SummableVersion(increment) sum = last_version + increment sum.reset_less_significant(increment) return sum
[ "def", "infer_next_version", "(", "last_version", ",", "increment", ")", ":", "if", "last_version", "is", "None", ":", "return", "increment", "last_version", "=", "SummableVersion", "(", "str", "(", "last_version", ")", ")", "if", "last_version", ".", "prereleas...
Given a simple application version (as a StrictVersion), and an increment (1.0, 0.1, or 0.0.1), guess the next version. Set up a shorthand for examples >>> def VM_infer(*params): ... return str(VersionManagement.infer_next_version(*params)) >>> VM_infer('3.2', '0.0.1') '3.2.1' >>> VM_infer(StrictVersion('3.2'), '0.0.1') '3.2.1' >>> VM_infer('3.2.3', '0.1') '3.3' >>> VM_infer('3.1.2', '1.0') '4.0' Subversions never increment parent versions >>> VM_infer('3.0.9', '0.0.1') '3.0.10' If it's a prerelease version, just remove the prerelease. >>> VM_infer('3.1a1', '0.0.1') '3.1' If there is no last version, use the increment itself >>> VM_infer(None, '0.1') '0.1'
[ "Given", "a", "simple", "application", "version", "(", "as", "a", "StrictVersion", ")", "and", "an", "increment", "(", "1", ".", "0", "0", ".", "1", "or", "0", ".", "0", ".", "1", ")", "guess", "the", "next", "version", "." ]
train
https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/versioning.py#L151-L194
PGower/PyCanvas
pycanvas/apis/account_notifications.py
AccountNotificationsAPI.create_global_notification
def create_global_notification(self, account_id, account_notification_end_at, account_notification_subject, account_notification_message, account_notification_start_at, account_notification_icon=None, account_notification_roles=None): """ Create a global notification. Create and return a new global notification for an account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - account_notification[subject] """The subject of the notification.""" data["account_notification[subject]"] = account_notification_subject # REQUIRED - account_notification[message] """The message body of the notification.""" data["account_notification[message]"] = account_notification_message # REQUIRED - account_notification[start_at] """The start date and time of the notification in ISO8601 format. e.g. 2014-01-01T01:00Z""" data["account_notification[start_at]"] = account_notification_start_at # REQUIRED - account_notification[end_at] """The end date and time of the notification in ISO8601 format. e.g. 2014-01-01T01:00Z""" data["account_notification[end_at]"] = account_notification_end_at # OPTIONAL - account_notification[icon] """The icon to display with the notification. Note: Defaults to warning.""" if account_notification_icon is not None: self._validate_enum(account_notification_icon, ["warning", "information", "question", "error", "calendar"]) data["account_notification[icon]"] = account_notification_icon # OPTIONAL - account_notification_roles """The role(s) to send global notification to. Note: ommitting this field will send to everyone Example: account_notification_roles: ["StudentEnrollment", "TeacherEnrollment"]""" if account_notification_roles is not None: data["account_notification_roles"] = account_notification_roles self.logger.debug("POST /api/v1/accounts/{account_id}/account_notifications with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/account_notifications".format(**path), data=data, params=params, no_data=True)
python
def create_global_notification(self, account_id, account_notification_end_at, account_notification_subject, account_notification_message, account_notification_start_at, account_notification_icon=None, account_notification_roles=None): """ Create a global notification. Create and return a new global notification for an account. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - account_notification[subject] """The subject of the notification.""" data["account_notification[subject]"] = account_notification_subject # REQUIRED - account_notification[message] """The message body of the notification.""" data["account_notification[message]"] = account_notification_message # REQUIRED - account_notification[start_at] """The start date and time of the notification in ISO8601 format. e.g. 2014-01-01T01:00Z""" data["account_notification[start_at]"] = account_notification_start_at # REQUIRED - account_notification[end_at] """The end date and time of the notification in ISO8601 format. e.g. 2014-01-01T01:00Z""" data["account_notification[end_at]"] = account_notification_end_at # OPTIONAL - account_notification[icon] """The icon to display with the notification. Note: Defaults to warning.""" if account_notification_icon is not None: self._validate_enum(account_notification_icon, ["warning", "information", "question", "error", "calendar"]) data["account_notification[icon]"] = account_notification_icon # OPTIONAL - account_notification_roles """The role(s) to send global notification to. Note: ommitting this field will send to everyone Example: account_notification_roles: ["StudentEnrollment", "TeacherEnrollment"]""" if account_notification_roles is not None: data["account_notification_roles"] = account_notification_roles self.logger.debug("POST /api/v1/accounts/{account_id}/account_notifications with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/account_notifications".format(**path), data=data, params=params, no_data=True)
[ "def", "create_global_notification", "(", "self", ",", "account_id", ",", "account_notification_end_at", ",", "account_notification_subject", ",", "account_notification_message", ",", "account_notification_start_at", ",", "account_notification_icon", "=", "None", ",", "account_...
Create a global notification. Create and return a new global notification for an account.
[ "Create", "a", "global", "notification", ".", "Create", "and", "return", "a", "new", "global", "notification", "for", "an", "account", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/account_notifications.py#L92-L139
anomaly/prestans
prestans/ext/data/adapters/__init__.py
adapt_persistent_instance
def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None): """ Adapts a single persistent instance to a REST model; at present this is a common method for all persistent backends. Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as for discussion on this feature """ # try and get the adapter and the REST class for the persistent object if target_rest_class is None: adapter_instance = registry.get_adapter_for_persistent_model(persistent_object) else: if inspect.isclass(target_rest_class): target_rest_class = target_rest_class() adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class) # would raise an exception if the attribute_filter differs from the target_rest_class if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter): parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter) # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter)
python
def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None): """ Adapts a single persistent instance to a REST model; at present this is a common method for all persistent backends. Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as for discussion on this feature """ # try and get the adapter and the REST class for the persistent object if target_rest_class is None: adapter_instance = registry.get_adapter_for_persistent_model(persistent_object) else: if inspect.isclass(target_rest_class): target_rest_class = target_rest_class() adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class) # would raise an exception if the attribute_filter differs from the target_rest_class if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter): parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter) # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter)
[ "def", "adapt_persistent_instance", "(", "persistent_object", ",", "target_rest_class", "=", "None", ",", "attribute_filter", "=", "None", ")", ":", "# try and get the adapter and the REST class for the persistent object", "if", "target_rest_class", "is", "None", ":", "adapte...
Adapts a single persistent instance to a REST model; at present this is a common method for all persistent backends. Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as for discussion on this feature
[ "Adapts", "a", "single", "persistent", "instance", "to", "a", "REST", "model", ";", "at", "present", "this", "is", "a", "common", "method", "for", "all", "persistent", "backends", "." ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/ext/data/adapters/__init__.py#L167-L193
anomaly/prestans
prestans/ext/data/adapters/__init__.py
ModelAdapter.adapt_persistent_to_rest
def adapt_persistent_to_rest(self, persistent_object, attribute_filter=None): """ adapts a persistent model to a rest model by inspecting """ # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() rest_model_instance = self.rest_model_class() for attribute_key in rest_model_instance.get_attribute_keys(): # attribute is not visible don't bother processing if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ not attribute_filter.is_attribute_visible(attribute_key): continue rest_attr = getattr(self.rest_model_class, attribute_key) # don't bother processing if the persistent model doesn't have this attribute if not hasattr(persistent_object, attribute_key): if isinstance(rest_attr, types.Model): #: If the attribute is a Model, then we set it to None otherwise we get a model #: with default values, which is invalid when constructing responses try: setattr(rest_model_instance, attribute_key, None) # catch any exception thrown from setattr to give a usable error message except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) continue # ignore class methods elif inspect.ismethod(getattr(persistent_object, attribute_key)): import logging logging.error("ignoring method: "+attribute_key) continue # handles prestans array population from SQLAlchemy relationships elif isinstance(rest_attr, types.Array): persistent_attr_value = getattr(persistent_object, attribute_key) rest_model_array_handle = getattr(rest_model_instance, attribute_key) # iterator uses the .append method exposed by prestans arrays to validate # and populate the collection in the instance. for collection_element in persistent_attr_value: if rest_attr.is_scalar: rest_model_array_handle.append(collection_element) else: element_adapter = registry.get_adapter_for_rest_model(rest_attr.element_template) # check if there is a sub model filter sub_attribute_filter = None if attribute_filter and attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = element_adapter.adapt_persistent_to_rest( collection_element, sub_attribute_filter ) rest_model_array_handle.append(adapted_rest_model) elif isinstance(rest_attr, types.Model): try: persistent_attr_value = getattr(persistent_object, attribute_key) if persistent_attr_value is None: adapted_rest_model = None else: model_adapter = registry.get_adapter_for_rest_model(rest_attr) # check if there is a sub model filter sub_attribute_filter = None if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = model_adapter.adapt_persistent_to_rest( persistent_attr_value, sub_attribute_filter ) setattr(rest_model_instance, attribute_key, adapted_rest_model) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.DataValidationException as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) else: # otherwise copy the value to the rest model try: persistent_attr_value = getattr(persistent_object, attribute_key) setattr(rest_model_instance, attribute_key, persistent_attr_value) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.ValidationError as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) return rest_model_instance
python
def adapt_persistent_to_rest(self, persistent_object, attribute_filter=None): """ adapts a persistent model to a rest model by inspecting """ # convert filter to immutable if it isn't already if isinstance(attribute_filter, parser.AttributeFilter): attribute_filter = attribute_filter.as_immutable() rest_model_instance = self.rest_model_class() for attribute_key in rest_model_instance.get_attribute_keys(): # attribute is not visible don't bother processing if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ not attribute_filter.is_attribute_visible(attribute_key): continue rest_attr = getattr(self.rest_model_class, attribute_key) # don't bother processing if the persistent model doesn't have this attribute if not hasattr(persistent_object, attribute_key): if isinstance(rest_attr, types.Model): #: If the attribute is a Model, then we set it to None otherwise we get a model #: with default values, which is invalid when constructing responses try: setattr(rest_model_instance, attribute_key, None) # catch any exception thrown from setattr to give a usable error message except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) continue # ignore class methods elif inspect.ismethod(getattr(persistent_object, attribute_key)): import logging logging.error("ignoring method: "+attribute_key) continue # handles prestans array population from SQLAlchemy relationships elif isinstance(rest_attr, types.Array): persistent_attr_value = getattr(persistent_object, attribute_key) rest_model_array_handle = getattr(rest_model_instance, attribute_key) # iterator uses the .append method exposed by prestans arrays to validate # and populate the collection in the instance. for collection_element in persistent_attr_value: if rest_attr.is_scalar: rest_model_array_handle.append(collection_element) else: element_adapter = registry.get_adapter_for_rest_model(rest_attr.element_template) # check if there is a sub model filter sub_attribute_filter = None if attribute_filter and attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = element_adapter.adapt_persistent_to_rest( collection_element, sub_attribute_filter ) rest_model_array_handle.append(adapted_rest_model) elif isinstance(rest_attr, types.Model): try: persistent_attr_value = getattr(persistent_object, attribute_key) if persistent_attr_value is None: adapted_rest_model = None else: model_adapter = registry.get_adapter_for_rest_model(rest_attr) # check if there is a sub model filter sub_attribute_filter = None if isinstance(attribute_filter, (parser.AttributeFilter, parser.AttributeFilterImmutable)) and \ attribute_key in attribute_filter: sub_attribute_filter = getattr(attribute_filter, attribute_key) adapted_rest_model = model_adapter.adapt_persistent_to_rest( persistent_attr_value, sub_attribute_filter ) setattr(rest_model_instance, attribute_key, adapted_rest_model) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.DataValidationException as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) else: # otherwise copy the value to the rest model try: persistent_attr_value = getattr(persistent_object, attribute_key) setattr(rest_model_instance, attribute_key, persistent_attr_value) except TypeError as exp: raise TypeError('Attribute %s, %s' % (attribute_key, str(exp))) except exception.ValidationError as exp: raise exception.InconsistentPersistentDataError(attribute_key, str(exp)) return rest_model_instance
[ "def", "adapt_persistent_to_rest", "(", "self", ",", "persistent_object", ",", "attribute_filter", "=", "None", ")", ":", "# convert filter to immutable if it isn't already", "if", "isinstance", "(", "attribute_filter", ",", "parser", ".", "AttributeFilter", ")", ":", "...
adapts a persistent model to a rest model by inspecting
[ "adapts", "a", "persistent", "model", "to", "a", "rest", "model", "by", "inspecting" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/ext/data/adapters/__init__.py#L62-L164
anomaly/prestans
prestans/ext/data/adapters/__init__.py
AdapterRegistryManager.register_persistent_rest_pair
def register_persistent_rest_pair(self, persistent_model_class, rest_model_class): """ :param persistent_model_class: :param rest_model_class: """ self.register_adapter(ModelAdapter( rest_model_class=rest_model_class, persistent_model_class=persistent_model_class ))
python
def register_persistent_rest_pair(self, persistent_model_class, rest_model_class): """ :param persistent_model_class: :param rest_model_class: """ self.register_adapter(ModelAdapter( rest_model_class=rest_model_class, persistent_model_class=persistent_model_class ))
[ "def", "register_persistent_rest_pair", "(", "self", ",", "persistent_model_class", ",", "rest_model_class", ")", ":", "self", ".", "register_adapter", "(", "ModelAdapter", "(", "rest_model_class", "=", "rest_model_class", ",", "persistent_model_class", "=", "persistent_m...
:param persistent_model_class: :param rest_model_class:
[ ":", "param", "persistent_model_class", ":", ":", "param", "rest_model_class", ":" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/ext/data/adapters/__init__.py#L284-L292
anomaly/prestans
prestans/ext/data/adapters/__init__.py
AdapterRegistryManager.get_adapter_for_persistent_model
def get_adapter_for_persistent_model(self, persistent_model, rest_model=None): """ :param persistent_model: instance of persistent model :param rest_model: specific REST model :return: the matching model adapter :rtype: ModelAdapter """ persistent_signature = self.generate_signature(persistent_model) if persistent_signature in self._persistent_map: sub_map = self._persistent_map[persistent_signature] # return the first match if REST model was not specified if rest_model is None: return self._persistent_map[persistent_signature][self.DEFAULT_REST_ADAPTER] else: rest_sig = self.generate_signature(rest_model) if rest_sig in sub_map: return self._persistent_map[persistent_signature][rest_sig] raise TypeError("No registered Data Adapter for class %s" % persistent_signature)
python
def get_adapter_for_persistent_model(self, persistent_model, rest_model=None): """ :param persistent_model: instance of persistent model :param rest_model: specific REST model :return: the matching model adapter :rtype: ModelAdapter """ persistent_signature = self.generate_signature(persistent_model) if persistent_signature in self._persistent_map: sub_map = self._persistent_map[persistent_signature] # return the first match if REST model was not specified if rest_model is None: return self._persistent_map[persistent_signature][self.DEFAULT_REST_ADAPTER] else: rest_sig = self.generate_signature(rest_model) if rest_sig in sub_map: return self._persistent_map[persistent_signature][rest_sig] raise TypeError("No registered Data Adapter for class %s" % persistent_signature)
[ "def", "get_adapter_for_persistent_model", "(", "self", ",", "persistent_model", ",", "rest_model", "=", "None", ")", ":", "persistent_signature", "=", "self", ".", "generate_signature", "(", "persistent_model", ")", "if", "persistent_signature", "in", "self", ".", ...
:param persistent_model: instance of persistent model :param rest_model: specific REST model :return: the matching model adapter :rtype: ModelAdapter
[ ":", "param", "persistent_model", ":", "instance", "of", "persistent", "model", ":", "param", "rest_model", ":", "specific", "REST", "model", ":", "return", ":", "the", "matching", "model", "adapter", ":", "rtype", ":", "ModelAdapter" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/ext/data/adapters/__init__.py#L301-L321
anomaly/prestans
prestans/ext/data/adapters/__init__.py
AdapterRegistryManager.get_adapter_for_rest_model
def get_adapter_for_rest_model(self, rest_model): """ :param rest_model: instance of REST model :return: the matching model adapter :rtype: ModelAdapter """ class_signature = self.generate_signature(rest_model) if class_signature not in self._rest_map: raise TypeError("No registered Data Adapter for class %s" % class_signature) return self._rest_map[class_signature]
python
def get_adapter_for_rest_model(self, rest_model): """ :param rest_model: instance of REST model :return: the matching model adapter :rtype: ModelAdapter """ class_signature = self.generate_signature(rest_model) if class_signature not in self._rest_map: raise TypeError("No registered Data Adapter for class %s" % class_signature) return self._rest_map[class_signature]
[ "def", "get_adapter_for_rest_model", "(", "self", ",", "rest_model", ")", ":", "class_signature", "=", "self", ".", "generate_signature", "(", "rest_model", ")", "if", "class_signature", "not", "in", "self", ".", "_rest_map", ":", "raise", "TypeError", "(", "\"N...
:param rest_model: instance of REST model :return: the matching model adapter :rtype: ModelAdapter
[ ":", "param", "rest_model", ":", "instance", "of", "REST", "model", ":", "return", ":", "the", "matching", "model", "adapter", ":", "rtype", ":", "ModelAdapter" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/ext/data/adapters/__init__.py#L323-L334
theonion/django-bulbs
bulbs/reading_list/popular.py
popular_content
def popular_content(**kwargs): """ Use the get_popular_ids() to retrieve trending content objects. Return recent content on failure. """ limit = kwargs.get("limit", DEFAULT_LIMIT) popular_ids = get_popular_ids(limit=limit) if not popular_ids: # Return most recent content return Content.search_objects.search().extra(size=limit) return Content.search_objects.search().filter(es_filter.Ids(values=popular_ids))
python
def popular_content(**kwargs): """ Use the get_popular_ids() to retrieve trending content objects. Return recent content on failure. """ limit = kwargs.get("limit", DEFAULT_LIMIT) popular_ids = get_popular_ids(limit=limit) if not popular_ids: # Return most recent content return Content.search_objects.search().extra(size=limit) return Content.search_objects.search().filter(es_filter.Ids(values=popular_ids))
[ "def", "popular_content", "(", "*", "*", "kwargs", ")", ":", "limit", "=", "kwargs", ".", "get", "(", "\"limit\"", ",", "DEFAULT_LIMIT", ")", "popular_ids", "=", "get_popular_ids", "(", "limit", "=", "limit", ")", "if", "not", "popular_ids", ":", "# Return...
Use the get_popular_ids() to retrieve trending content objects. Return recent content on failure.
[ "Use", "the", "get_popular_ids", "()", "to", "retrieve", "trending", "content", "objects", ".", "Return", "recent", "content", "on", "failure", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/popular.py#L25-L35
izacus/pysolarized
pysolarized/solr.py
Solr._send_solr_command
def _send_solr_command(self, core_url, json_command): """ Sends JSON string to Solr instance """ # Check document language and dispatch to correct core url = _get_url(core_url, "update") try: response = self.req_session.post(url, data=json_command, headers={'Content-Type': 'application/json'}) response.raise_for_status() except requests.RequestException as e: logger.error("Failed to send update to Solr endpoint [%s]: %s", core_url, e, exc_info=True) raise SolrException("Failed to send command to Solr [%s]: %s" % (core_url, e,)) return True
python
def _send_solr_command(self, core_url, json_command): """ Sends JSON string to Solr instance """ # Check document language and dispatch to correct core url = _get_url(core_url, "update") try: response = self.req_session.post(url, data=json_command, headers={'Content-Type': 'application/json'}) response.raise_for_status() except requests.RequestException as e: logger.error("Failed to send update to Solr endpoint [%s]: %s", core_url, e, exc_info=True) raise SolrException("Failed to send command to Solr [%s]: %s" % (core_url, e,)) return True
[ "def", "_send_solr_command", "(", "self", ",", "core_url", ",", "json_command", ")", ":", "# Check document language and dispatch to correct core", "url", "=", "_get_url", "(", "core_url", ",", "\"update\"", ")", "try", ":", "response", "=", "self", ".", "req_sessio...
Sends JSON string to Solr instance
[ "Sends", "JSON", "string", "to", "Solr", "instance" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L69-L82
izacus/pysolarized
pysolarized/solr.py
Solr.add
def add(self, documents, boost=None): """ Adds documents to Solr index documents - Single item or list of items to add """ if not isinstance(documents, list): documents = [documents] documents = [{'doc': d} for d in documents] if boost: for d in documents: d['boost'] = boost self._add_batch.extend(documents) if len(self._add_batch) > SOLR_ADD_BATCH: self._addFlushBatch()
python
def add(self, documents, boost=None): """ Adds documents to Solr index documents - Single item or list of items to add """ if not isinstance(documents, list): documents = [documents] documents = [{'doc': d} for d in documents] if boost: for d in documents: d['boost'] = boost self._add_batch.extend(documents) if len(self._add_batch) > SOLR_ADD_BATCH: self._addFlushBatch()
[ "def", "add", "(", "self", ",", "documents", ",", "boost", "=", "None", ")", ":", "if", "not", "isinstance", "(", "documents", ",", "list", ")", ":", "documents", "=", "[", "documents", "]", "documents", "=", "[", "{", "'doc'", ":", "d", "}", "for"...
Adds documents to Solr index documents - Single item or list of items to add
[ "Adds", "documents", "to", "Solr", "index", "documents", "-", "Single", "item", "or", "list", "of", "items", "to", "add" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L94-L110
izacus/pysolarized
pysolarized/solr.py
Solr._addFlushBatch
def _addFlushBatch(self): """ Sends all waiting documents to Solr """ if len(self._add_batch) > 0: language_batches = {} # Create command JSONs for each of language endpoints for lang in self.endpoints: # Append documents with languages without endpoint to default endpoint document_jsons = ["\"add\":" + json.dumps(data) for data in self._add_batch if data['doc'].get("language", self.default_endpoint) == lang or (lang == self.default_endpoint and not self.endpoints.has_key(data['doc'].get("language", None)))] command_json = "{" + ",".join(document_jsons) + "}" language_batches[lang] = command_json # Solr requires for documents to be sent in { "add" : { "doc" : {...} }, "add": { "doc" : { ... }, ... } # format which isn't possible with python dictionaries for lang in language_batches: self._send_solr_command(self.endpoints[lang], language_batches[lang]) self._add_batch = []
python
def _addFlushBatch(self): """ Sends all waiting documents to Solr """ if len(self._add_batch) > 0: language_batches = {} # Create command JSONs for each of language endpoints for lang in self.endpoints: # Append documents with languages without endpoint to default endpoint document_jsons = ["\"add\":" + json.dumps(data) for data in self._add_batch if data['doc'].get("language", self.default_endpoint) == lang or (lang == self.default_endpoint and not self.endpoints.has_key(data['doc'].get("language", None)))] command_json = "{" + ",".join(document_jsons) + "}" language_batches[lang] = command_json # Solr requires for documents to be sent in { "add" : { "doc" : {...} }, "add": { "doc" : { ... }, ... } # format which isn't possible with python dictionaries for lang in language_batches: self._send_solr_command(self.endpoints[lang], language_batches[lang]) self._add_batch = []
[ "def", "_addFlushBatch", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_add_batch", ")", ">", "0", ":", "language_batches", "=", "{", "}", "# Create command JSONs for each of language endpoints", "for", "lang", "in", "self", ".", "endpoints", ":", "# ...
Sends all waiting documents to Solr
[ "Sends", "all", "waiting", "documents", "to", "Solr" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L112-L130
izacus/pysolarized
pysolarized/solr.py
Solr.deleteAll
def deleteAll(self): """ Deletes whole Solr index. Use with care. """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\": { \"query\" : \"*:*\"}}")
python
def deleteAll(self): """ Deletes whole Solr index. Use with care. """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\": { \"query\" : \"*:*\"}}")
[ "def", "deleteAll", "(", "self", ")", ":", "for", "core", "in", "self", ".", "endpoints", ":", "self", ".", "_send_solr_command", "(", "self", ".", "endpoints", "[", "core", "]", ",", "\"{\\\"delete\\\": { \\\"query\\\" : \\\"*:*\\\"}}\"", ")" ]
Deletes whole Solr index. Use with care.
[ "Deletes", "whole", "Solr", "index", ".", "Use", "with", "care", "." ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L132-L137
izacus/pysolarized
pysolarized/solr.py
Solr.delete
def delete(self, id): """ Deletes document with ID on all Solr cores """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\" : { \"id\" : \"%s\"}}" % (id,))
python
def delete(self, id): """ Deletes document with ID on all Solr cores """ for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\" : { \"id\" : \"%s\"}}" % (id,))
[ "def", "delete", "(", "self", ",", "id", ")", ":", "for", "core", "in", "self", ".", "endpoints", ":", "self", ".", "_send_solr_command", "(", "self", ".", "endpoints", "[", "core", "]", ",", "\"{\\\"delete\\\" : { \\\"id\\\" : \\\"%s\\\"}}\"", "%", "(", "id...
Deletes document with ID on all Solr cores
[ "Deletes", "document", "with", "ID", "on", "all", "Solr", "cores" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L139-L144
izacus/pysolarized
pysolarized/solr.py
Solr.commit
def commit(self): """ Flushes all pending changes and commits Solr changes """ self._addFlushBatch() for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{ \"commit\":{} }")
python
def commit(self): """ Flushes all pending changes and commits Solr changes """ self._addFlushBatch() for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{ \"commit\":{} }")
[ "def", "commit", "(", "self", ")", ":", "self", ".", "_addFlushBatch", "(", ")", "for", "core", "in", "self", ".", "endpoints", ":", "self", ".", "_send_solr_command", "(", "self", ".", "endpoints", "[", "core", "]", ",", "\"{ \\\"commit\\\":{} }\"", ")" ]
Flushes all pending changes and commits Solr changes
[ "Flushes", "all", "pending", "changes", "and", "commits", "Solr", "changes" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L146-L152
izacus/pysolarized
pysolarized/solr.py
Solr._get_shards
def _get_shards(self): """ Returns comma separated list of configured Solr cores """ if self._shards is None: endpoints = [] for endpoint in self.endpoints: # We need to remove and http:// prefixes from URLs url = urlparse.urlparse(self.endpoints[endpoint]) endpoints.append("/".join([url.netloc, url.path])) self._shards = ",".join(endpoints) return self._shards
python
def _get_shards(self): """ Returns comma separated list of configured Solr cores """ if self._shards is None: endpoints = [] for endpoint in self.endpoints: # We need to remove and http:// prefixes from URLs url = urlparse.urlparse(self.endpoints[endpoint]) endpoints.append("/".join([url.netloc, url.path])) self._shards = ",".join(endpoints) return self._shards
[ "def", "_get_shards", "(", "self", ")", ":", "if", "self", ".", "_shards", "is", "None", ":", "endpoints", "=", "[", "]", "for", "endpoint", "in", "self", ".", "endpoints", ":", "# We need to remove and http:// prefixes from URLs", "url", "=", "urlparse", ".",...
Returns comma separated list of configured Solr cores
[ "Returns", "comma", "separated", "list", "of", "configured", "Solr", "cores" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L158-L169
izacus/pysolarized
pysolarized/solr.py
Solr._parse_response
def _parse_response(self, results): """ Parses result dictionary into a SolrResults object """ dict_response = results.get("response") result_obj = SolrResults() result_obj.query_time = results.get("responseHeader").get("QTime", None) result_obj.results_count = dict_response.get("numFound", 0) result_obj.start_index = dict_response.get("start", 0) for doc in dict_response.get("docs", []): result_obj.documents.append(doc) # Process facets if "facet_counts" in results: facet_types = ["facet_fields", "facet_dates", "facet_ranges", "facet_queries"] for type in facet_types: assert type in results.get("facet_counts") items = results.get("facet_counts").get(type) for field, values in items.items(): result_obj.facets[field] = [] # Range facets have results in "counts" subkey and "between/after" on top level. Flatten this. if type == "facet_ranges": if not "counts" in values: continue for facet, value in values["counts"].items(): result_obj.facets[field].append((facet, value)) if "before" in values: result_obj.facets[field].append(("before", values["before"])) if "after" in values: result_obj.facets[field].append(("after", values["after"])) else: for facet, value in values.items(): # Date facets have metadata fields between the results, skip the params, keep "before" and "after" fields for other if type == "facet_dates" and \ (facet == "gap" or facet == "between" or facet == "start" or facet == "end"): continue result_obj.facets[field].append((facet, value)) # Process highlights if "highlighting" in results: for key, value in results.get("highlighting").items(): result_obj.highlights[key] = value return result_obj
python
def _parse_response(self, results): """ Parses result dictionary into a SolrResults object """ dict_response = results.get("response") result_obj = SolrResults() result_obj.query_time = results.get("responseHeader").get("QTime", None) result_obj.results_count = dict_response.get("numFound", 0) result_obj.start_index = dict_response.get("start", 0) for doc in dict_response.get("docs", []): result_obj.documents.append(doc) # Process facets if "facet_counts" in results: facet_types = ["facet_fields", "facet_dates", "facet_ranges", "facet_queries"] for type in facet_types: assert type in results.get("facet_counts") items = results.get("facet_counts").get(type) for field, values in items.items(): result_obj.facets[field] = [] # Range facets have results in "counts" subkey and "between/after" on top level. Flatten this. if type == "facet_ranges": if not "counts" in values: continue for facet, value in values["counts"].items(): result_obj.facets[field].append((facet, value)) if "before" in values: result_obj.facets[field].append(("before", values["before"])) if "after" in values: result_obj.facets[field].append(("after", values["after"])) else: for facet, value in values.items(): # Date facets have metadata fields between the results, skip the params, keep "before" and "after" fields for other if type == "facet_dates" and \ (facet == "gap" or facet == "between" or facet == "start" or facet == "end"): continue result_obj.facets[field].append((facet, value)) # Process highlights if "highlighting" in results: for key, value in results.get("highlighting").items(): result_obj.highlights[key] = value return result_obj
[ "def", "_parse_response", "(", "self", ",", "results", ")", ":", "dict_response", "=", "results", ".", "get", "(", "\"response\"", ")", "result_obj", "=", "SolrResults", "(", ")", "result_obj", ".", "query_time", "=", "results", ".", "get", "(", "\"responseH...
Parses result dictionary into a SolrResults object
[ "Parses", "result", "dictionary", "into", "a", "SolrResults", "object" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L171-L220
izacus/pysolarized
pysolarized/solr.py
Solr.query
def query(self, query, filters=None, columns=None, sort=None, start=0, rows=30): """ Queries Solr and returns results query - Text query to search for filters - dictionary of filters to apply when searching in form of { "field":"filter_value" } columns - columns to return, list of strings sort - list of fields to sort on in format of ["field asc", "field desc", ... ] start - start number of first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if not columns: columns = ["*", "score"] fields = {"q": query, "json.nl" :"map", # Return facets as JSON objects "fl": ",".join(columns), # Return score along with results "start": str(start), "rows": str(rows), "wt": "json"} # Use shards parameter only if there are several cores active if len(self.endpoints) > 1: fields["shards"] = self._get_shards() # Prepare filters if not filters is None: filter_list = [] for filter_field, value in filters.items(): filter_list.append("%s:%s" % (filter_field, value)) fields["fq"] = " AND ".join(filter_list) # Append sorting parameters if not sort is None: fields["sort"] = ",".join(sort) # Do request to Solr server to default endpoint (other cores will be queried with shard functionality) assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "select") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
python
def query(self, query, filters=None, columns=None, sort=None, start=0, rows=30): """ Queries Solr and returns results query - Text query to search for filters - dictionary of filters to apply when searching in form of { "field":"filter_value" } columns - columns to return, list of strings sort - list of fields to sort on in format of ["field asc", "field desc", ... ] start - start number of first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if not columns: columns = ["*", "score"] fields = {"q": query, "json.nl" :"map", # Return facets as JSON objects "fl": ",".join(columns), # Return score along with results "start": str(start), "rows": str(rows), "wt": "json"} # Use shards parameter only if there are several cores active if len(self.endpoints) > 1: fields["shards"] = self._get_shards() # Prepare filters if not filters is None: filter_list = [] for filter_field, value in filters.items(): filter_list.append("%s:%s" % (filter_field, value)) fields["fq"] = " AND ".join(filter_list) # Append sorting parameters if not sort is None: fields["sort"] = ",".join(sort) # Do request to Solr server to default endpoint (other cores will be queried with shard functionality) assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "select") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
[ "def", "query", "(", "self", ",", "query", ",", "filters", "=", "None", ",", "columns", "=", "None", ",", "sort", "=", "None", ",", "start", "=", "0", ",", "rows", "=", "30", ")", ":", "if", "not", "columns", ":", "columns", "=", "[", "\"*\"", ...
Queries Solr and returns results query - Text query to search for filters - dictionary of filters to apply when searching in form of { "field":"filter_value" } columns - columns to return, list of strings sort - list of fields to sort on in format of ["field asc", "field desc", ... ] start - start number of first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30)
[ "Queries", "Solr", "and", "returns", "results" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L222-L275
izacus/pysolarized
pysolarized/solr.py
Solr.more_like_this
def more_like_this(self, query, fields, columns=None, start=0, rows=30): """ Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = {'q' : query, 'json.nl': 'map', 'mlt.fl': mlt_fields, 'fl': ",".join(columns), 'start': str(start), 'rows': str(rows), 'wt': "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
python
def more_like_this(self, query, fields, columns=None, start=0, rows=30): """ Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = {'q' : query, 'json.nl': 'map', 'mlt.fl': mlt_fields, 'fl': ",".join(columns), 'start': str(start), 'rows': str(rows), 'wt': "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
[ "def", "more_like_this", "(", "self", ",", "query", ",", "fields", ",", "columns", "=", "None", ",", "start", "=", "0", ",", "rows", "=", "30", ")", ":", "if", "isinstance", "(", "fields", ",", "basestring", ")", ":", "mlt_fields", "=", "fields", "el...
Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30)
[ "Retrieves", "more", "like", "this", "results", "for", "a", "passed", "query", "document" ]
train
https://github.com/izacus/pysolarized/blob/d820e20a45b63e5b88b0421eb703037a8d7ad4a3/pysolarized/solr.py#L277-L321
LIVVkit/LIVVkit
livvkit/components/numerics.py
run_suite
def run_suite(case, config, summary): """ Run the full suite of numerics tests """ m = importlib.import_module(config['module']) m.set_up() config["name"] = case analysis_data = {} bundle = livvkit.numerics_model_module model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs") config["plot_dir"] = plot_dir functions.mkdir_p(plot_dir) model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for mscale in sorted(model_cases): bscale = bench_cases[mscale] if mscale in bench_cases else [] for mproc in model_cases[mscale]: full_name = '-'.join([mscale, mproc]) bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep)) if mproc in bscale else "") mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep)) model_data = functions.find_file(mpath, "*" + config["output_ext"]) bench_data = functions.find_file(bpath, "*" + config["output_ext"]) analysis_data[full_name] = bundle.get_plot_data(model_data, bench_data, m.setup[case], config) try: el = m.run(config, analysis_data) except KeyError: el = elements.error("Numerics Plots", "Missing data") result = elements.page(case, config['description'], element_list=el) summary[case] = _summarize_result(m, analysis_data, config) _print_summary(m, case, summary[case]) functions.create_page_from_template("numerics.html", os.path.join(livvkit.index_dir, "numerics", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
python
def run_suite(case, config, summary): """ Run the full suite of numerics tests """ m = importlib.import_module(config['module']) m.set_up() config["name"] = case analysis_data = {} bundle = livvkit.numerics_model_module model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs") config["plot_dir"] = plot_dir functions.mkdir_p(plot_dir) model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) for mscale in sorted(model_cases): bscale = bench_cases[mscale] if mscale in bench_cases else [] for mproc in model_cases[mscale]: full_name = '-'.join([mscale, mproc]) bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep)) if mproc in bscale else "") mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep)) model_data = functions.find_file(mpath, "*" + config["output_ext"]) bench_data = functions.find_file(bpath, "*" + config["output_ext"]) analysis_data[full_name] = bundle.get_plot_data(model_data, bench_data, m.setup[case], config) try: el = m.run(config, analysis_data) except KeyError: el = elements.error("Numerics Plots", "Missing data") result = elements.page(case, config['description'], element_list=el) summary[case] = _summarize_result(m, analysis_data, config) _print_summary(m, case, summary[case]) functions.create_page_from_template("numerics.html", os.path.join(livvkit.index_dir, "numerics", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
[ "def", "run_suite", "(", "case", ",", "config", ",", "summary", ")", ":", "m", "=", "importlib", ".", "import_module", "(", "config", "[", "'module'", "]", ")", "m", ".", "set_up", "(", ")", "config", "[", "\"name\"", "]", "=", "case", "analysis_data",...
Run the full suite of numerics tests
[ "Run", "the", "full", "suite", "of", "numerics", "tests" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/numerics.py#L42-L79