docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Run tox. Build package and run unit tests against several pythons. Args: args: Optional arguments passed to tox. Example: fab tox:'-e py36 -r'
def tox(args=''): basedir = dirname(__file__) latest_pythons = _determine_latest_pythons() # e.g. highest_minor_python: '3.6' highest_minor_python = _highest_minor(latest_pythons) _local_needs_pythons(flo('cd {basedir} && ' 'python{highest_minor_python} -m tox {args}'))
767,330
Instantiation an instance of the Slack API Args: token: {str} (required) API token, read from SLACK_TOKEN env var auth_test: {bool} verify this token verify: {bool} verify all API calls return with a True 'ok' lazy: {bool} Don't populate properties until called
def __init__(self, token=None, auth_test=False, verify=True, lazy=False): try: self.token = token if token else os.environ['SLACK_TOKEN'] except KeyError: raise ValueError('If not providing a token, must set SLACK_TOKEN envvar') if auth_test: response = self.auth_test() if not response['ok']: raise ValueError('Authentication Failed with response: {}'.format(response)) self.verify = verify # Attributes backing properties self._channels = [] self._users = [] if not lazy: _ = self.channels _ = self.users
768,476
Low-level method to call the Slack API. Args: method: {str} method name to call params: {dict} GET parameters The token will always be added
def _call_api(self, method, params=None): url = self.url.format(method=method) if not params: params = {'token': self.token} else: params['token'] = self.token logger.debug('Send request to %s', url) response = requests.get(url, params=params).json() if self.verify: if not response['ok']: msg = 'For {url} API returned this bad response {response}' raise Exception(msg.format(url=url, response=response)) return response
768,477
Checks the format of the sakefile dictionary to ensure it conforms to specification Args: A dictionary that is the parsed Sakefile (from sake.py) The setting dictionary (for print functions) Returns: True if the Sakefile is conformant False if not
def check_integrity(sakefile, settings): sprint = settings["sprint"] error = settings["error"] sprint("Call to check_integrity issued", level="verbose") if not sakefile: error("Sakefile is empty") return False # checking for duplicate targets if len(sakefile.keys()) != len(set(sakefile.keys())): error("Sakefile contains duplicate targets") return False for target in sakefile: if target == "all": if not check_target_integrity(target, sakefile["all"], all=True): error("Failed to accept target 'all'") return False continue if "formula" not in sakefile[target]: if not check_target_integrity(target, sakefile[target], meta=True): errmes = "Failed to accept meta-target '{}'".format(target) error(errmes) return False for atom_target in sakefile[target]: if atom_target == "help": continue if not check_target_integrity(atom_target, sakefile[target][atom_target], parent=target): errmes = "Failed to accept target '{}'\n".format( atom_target) error(errmes) return False continue if not check_target_integrity(target, sakefile[target]): errmes = "Failed to accept target '{}'\n".format(target) error(errmes) return False return True
768,483
Checks the integrity of a specific target. Gets called multiple times from check_integrity() Args: The target name The dictionary values of that target A boolean representing whether it is a meta-target A boolean representing whether it is the "all" target A string representing name of parent (default None) Returns: True is the target is conformant False if not
def check_target_integrity(key, values, meta=False, all=False, parent=None): # logic to audit "all" target if all: if not values: print("Warning: target 'all' is empty") # will check if it has unrecognized target later return True errmes = "target '{}' is not allowed to be missing a help message\n" # logic to audit a meta-target if meta: # check if help is missing if "help" not in values: sys.stderr.write(errmes.format(key)) return False # checking if empty if len(values.keys()) == 1: sys.stderr.write("Meta-target '{}' is empty\n".format(key)) return False return True # logic to audit any other target expected_fields = ["dependencies", "help", "output", "formula"] expected_fields = set(expected_fields) try: our_keys_set = set(values.keys()) except: sys.stderr.write("Error processing target '{}'\n".format(key)) sys.stderr.write("Are you sure '{}' is a meta-target?\n".format( parent)) sys.stderr.write("If it's not, it's missing a formula\n") return False ignored_fields = set([field for field in our_keys_set\ if field.strip().startswith("(ignore)")]) difference = our_keys_set - expected_fields - ignored_fields if difference: print("The following fields were not recognized and will be ignored") for item in difference: print(" - " + item) if "help" not in values: sys.stderr.write(errmes.format(key)) return False # can't be missing formula either if "formula" not in values: sys.stderr.write("Target '{}' is missing formula\n".format(key)) return False return True
768,484
Takes sha1 hash of all dependencies and outputs of all targets Args: The graph we are going to build The settings dictionary Returns: A dictionary where the keys are the filenames and the value is the sha1 hash
def take_shas_of_all_files(G, settings): global ERROR_FN sprint = settings["sprint"] error = settings["error"] ERROR_FN = error sha_dict = {} all_files = [] for target in G.nodes(data=True): sprint("About to take shas of files in target '{}'".format(target[0]), level="verbose") if 'dependencies' in target[1]: sprint("It has dependencies", level="verbose") deplist = [] for dep in target[1]['dependencies']: glist = glob.glob(dep) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(dep) target[1]['dependencies'] = list(deplist) for dep in target[1]['dependencies']: sprint(" - {}".format(dep), level="verbose") all_files.append(dep) if 'output' in target[1]: sprint("It has outputs", level="verbose") for out in acts.get_all_outputs(target[1]): sprint(" - {}".format(out), level="verbose") all_files.append(out) if len(all_files): sha_dict['files'] = {} # check if files exist and de-dupe extant_files = [] for item in all_files: if item not in extant_files and os.path.isfile(item): extant_files.append(item) pool = Pool() results = pool.map(get_sha, extant_files) pool.close() pool.join() for fn, sha in zip(extant_files, results): sha_dict['files'][fn] = {'sha': sha} return sha_dict sprint("No dependencies", level="verbose")
768,490
Determines if a target needs to run. This can happen in two ways: (a) If a dependency of the target has changed (b) If an output of the target is missing Args: The graph we are going to build The name of the target The dictionary of the current shas held in memory The dictionary of the shas from the shastore The settings dictionary Returns: True if the target needs to be run False if not
def needs_to_run(G, target, in_mem_shas, from_store, settings): force = settings["force"] sprint = settings["sprint"] if(force): sprint("Target rebuild is being forced so {} needs to run".format(target), level="verbose") return True node_dict = get_the_node_dict(G, target) if 'output' in node_dict: for output in acts.get_all_outputs(node_dict): if not os.path.isfile(output): outstr = "Output file '{}' is missing so it needs to run" sprint(outstr.format(output), level="verbose") return True if 'dependencies' not in node_dict: # if it has no dependencies, it always needs to run sprint("Target {} has no dependencies and needs to run".format(target), level="verbose") return True for dep in node_dict['dependencies']: # because the shas are updated after all targets build, # its possible that the dependency's sha doesn't exist # in the current "in_mem" dictionary. If this is the case, # then the target needs to run if ('files' in in_mem_shas and dep not in in_mem_shas['files'] or 'files' not in in_mem_shas): outstr = "Dep '{}' doesn't exist in memory so it needs to run" sprint(outstr.format(dep), level="verbose") return True now_sha = in_mem_shas['files'][dep]['sha'] if ('files' in from_store and dep not in from_store['files'] or 'files' not in from_store): outst = "Dep '{}' doesn't exist in shastore so it needs to run" sprint(outst.format(dep), level="verbose") return True old_sha = from_store['files'][dep]['sha'] if now_sha != old_sha: outstr = "There's a mismatch for dep {} so it needs to run" sprint(outstr.format(dep), level="verbose") return True sprint("Target '{}' doesn't need to run".format(target), level="verbose") return False
768,491
Runs the commands supplied as an argument It will exit the program if the commands return a non-zero code Args: the commands to run The settings dictionary
def run_commands(commands, settings): sprint = settings["sprint"] quiet = settings["quiet"] error = settings["error"] enhanced_errors = True the_shell = None if settings["no_enhanced_errors"]: enhanced_errors = False if "shell" in settings: the_shell = settings["shell"] windows_p = sys.platform == "win32" STDOUT = None STDERR = None if quiet: STDOUT = PIPE STDERR = PIPE commands = commands.rstrip() sprint("About to run commands '{}'".format(commands), level="verbose") if not quiet: sprint(commands) if the_shell: tmp = shlex.split(the_shell) the_shell = tmp[0] tmp = tmp[1:] if enhanced_errors and not windows_p: tmp.append("-e") tmp.append(commands) commands = tmp else: if enhanced_errors and not windows_p: commands = ["-e", commands] p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR, executable=the_shell) out, err = p.communicate() if p.returncode: if quiet: error(err.decode(locale.getpreferredencoding())) error("Command failed to run") sys.exit(1)
768,492
Wrapper function that sends to commands in a target's 'formula' to run_commands() Args: The graph we are going to build The target to run The settings dictionary
def run_the_target(G, target, settings): sprint = settings["sprint"] sprint("Running target {}".format(target)) the_formula = get_the_node_dict(G, target)["formula"] run_commands(the_formula, settings)
768,493
This is the master function that performs the building. Args: A graph (often a subgraph) The settings dictionary An optional list of files to not update the shas of (needed when building specific targets) Returns: 0 if successful UN-success results in a fatal error so it will return 0 or nothing
def build_this_graph(G, settings, dont_update_shas_of=None): verbose = settings["verbose"] quiet = settings["quiet"] force = settings["force"] recon = settings["recon"] parallel = settings["parallel"] error = settings["error"] sprint = settings["sprint"] if not dont_update_shas_of: dont_update_shas_of = [] sprint("Checking that graph is directed acyclic", level="verbose") if not nx.is_directed_acyclic_graph(G): errmes = "Dependency resolution is impossible; " errmes += "graph is not directed and acyclic" errmes += "\nCheck the Sakefile\n" error(errmes) sys.exit(1) sprint("Dependency resolution is possible", level="verbose") in_mem_shas = take_shas_of_all_files(G, settings) from_store = {} if not os.path.isfile(".shastore"): write_shas_to_shastore(in_mem_shas) in_mem_shas = {} in_mem_shas['files'] = {} with io.open(".shastore", "r") as fh: shas_on_disk = fh.read() from_store = yaml.load(shas_on_disk) check_shastore_version(from_store, settings) if not from_store: write_shas_to_shastore(in_mem_shas) in_mem_shas = {} in_mem_shas['files'] = {} with io.open(".shastore", "r") as fh: shas_on_disk = fh.read() from_store = yaml.load(shas_on_disk) # parallel if parallel: for line in parallel_sort(G): line = sorted(line) out = "Checking if targets '{}' need to be run" sprint(out.format(", ".join(line)), level="verbose") to_build = [] for item in line: if needs_to_run(G, item, in_mem_shas, from_store, settings): to_build.append(item) if to_build: if recon: if len(to_build) == 1: out = "Would run target '{}'" sprint(out.format(to_build[0])) else: out = "Would run targets '{}' in parallel" sprint(out.format(", ".join(to_build))) continue parallel_run_these(G, to_build, in_mem_shas, from_store, settings, dont_update_shas_of) # not parallel else: # still have to use parallel_sort to make # build order deterministic (by sorting targets) targets = [] for line in parallel_sort(G): for item in sorted(line): targets.append(item) for target in targets: outstr = "Checking if target '{}' needs to be run" sprint(outstr.format(target), level="verbose") if needs_to_run(G, target, in_mem_shas, from_store, settings): if recon: sprint("Would run target: {}".format(target)) continue run_the_target(G, target, settings) node_dict = get_the_node_dict(G, target) if "output" in node_dict: for output in acts.get_all_outputs(node_dict): if output not in dont_update_shas_of: in_mem_shas['files'][output] = {"sha": get_sha(output, settings)} write_shas_to_shastore(in_mem_shas) if "dependencies" in node_dict: for dep in acts.get_all_dependencies(node_dict): if dep not in dont_update_shas_of: in_mem_shas['files'][dep] = {"sha": get_sha(dep, settings)} write_shas_to_shastore(in_mem_shas) if recon: return 0 in_mem_shas = take_shas_of_all_files(G, settings) if in_mem_shas: in_mem_shas = merge_from_store_and_in_mems(from_store, in_mem_shas, dont_update_shas_of) write_shas_to_shastore(in_mem_shas) sprint("Done", color=True) return 0
768,501
Returns the prettily formatted help strings (for printing) Args: A dictionary that is the parsed Sakefile (from sake.py) NOTE: the list sorting in this function is required for this function to be deterministic
def get_help(sakefile): full_string = "You can 'sake' one of the following...\n\n" errmes = "target '{}' is not allowed to not have help message\n" outerlines = [] for target in sakefile: if target == "all": # this doesn't have a help message continue middle_lines = [] if "formula" not in sakefile[target]: # this means it's a meta-target innerstr = "{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"]) inner = [] for atom_target in sakefile[target]: if atom_target == "help": continue inner.append(" {}:\n - {}\n\n".format(escp(atom_target), sakefile[target][atom_target]["help"])) if inner: innerstr += '\n'.join(sorted(inner)) middle_lines.append(innerstr) else: middle_lines.append("{}:\n - {}\n\n".format(escp(target), sakefile[target]["help"])) if middle_lines: outerlines.append('\n'.join(sorted(middle_lines))) if outerlines: full_string += '\n'.join(sorted(outerlines)) what_clean_does = "remove all targets' outputs and start from scratch" full_string += "\nclean:\n - {}\n\n".format(what_clean_does) what_visual_does = "output visual representation of project's dependencies" full_string += "visual:\n - {}\n".format(what_visual_does) full_string = re.sub("\n{3,}", "\n\n", full_string) return full_string
768,517
Function to help construct_graph() identify dependencies Args: A dependency A flag indication verbosity A (populated) NetworkX DiGraph Returns: A list of targets that build given dependency
def check_for_dep_in_outputs(dep, verbose, G): if verbose: print("checking dep {}".format(dep)) ret_list = [] for node in G.nodes(data=True): if "output" not in node[1]: continue for out in node[1]['output']: if fnmatch.fnmatch(out, dep): ret_list.append(node[0]) break return ret_list
768,520
Takes the sakefile dictionary and builds a NetworkX graph Args: A dictionary that is the parsed Sakefile (from sake.py) The settings dictionary Returns: A NetworkX graph
def construct_graph(sakefile, settings): verbose = settings["verbose"] sprint = settings["sprint"] G = nx.DiGraph() sprint("Going to construct Graph", level="verbose") for target in sakefile: if target == "all": # we don't want this node continue if "formula" not in sakefile[target]: # that means this is a meta target for atomtarget in sakefile[target]: if atomtarget == "help": continue sprint("Adding '{}'".format(atomtarget), level="verbose") data_dict = sakefile[target][atomtarget] data_dict["parent"] = target G.add_node(atomtarget, **data_dict) else: sprint("Adding '{}'".format(target), level="verbose") G.add_node(target, **sakefile[target]) sprint("Nodes are built\nBuilding connections", level="verbose") for node in G.nodes(data=True): sprint("checking node {} for dependencies".format(node[0]), level="verbose") # normalize all paths in output for k, v in node[1].items(): if v is None: node[1][k] = [] if "output" in node[1]: for index, out in enumerate(node[1]['output']): node[1]['output'][index] = clean_path(node[1]['output'][index]) if "dependencies" not in node[1]: continue sprint("it has dependencies", level="verbose") connects = [] # normalize all paths in dependencies for index, dep in enumerate(node[1]['dependencies']): dep = os.path.normpath(dep) shrt = "dependencies" node[1]['dependencies'][index] = clean_path(node[1][shrt][index]) for node in G.nodes(data=True): connects = [] if "dependencies" not in node[1]: continue for dep in node[1]['dependencies']: matches = check_for_dep_in_outputs(dep, verbose, G) if not matches: continue for match in matches: sprint("Appending {} to matches".format(match), level="verbose") connects.append(match) if connects: for connect in connects: G.add_edge(connect, node[0]) return G
768,525
Removes all the output files from all targets. Takes the graph as the only argument Args: The networkx graph object The settings dictionary Returns: 0 if successful 1 if removing even one file failed
def clean_all(G, settings): quiet = settings["quiet"] recon = settings["recon"] sprint = settings["sprint"] error = settings["error"] all_outputs = [] for node in G.nodes(data=True): if "output" in node[1]: for item in get_all_outputs(node[1]): all_outputs.append(item) all_outputs.append(".shastore") retcode = 0 for item in sorted(all_outputs): if os.path.isfile(item): if recon: sprint("Would remove file: {}".format(item)) continue sprint("Attempting to remove file '{}'", level="verbose") try: os.remove(item) sprint("Removed file", level="verbose") except: errmes = "Error: file '{}' failed to be removed" error(errmes.format(item)) retcode = 1 if not retcode and not recon: sprint("All clean", color=True) return retcode
768,528
Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files
def write_dot_file(G, filename): with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
768,529
Uses networkX to draw a graphviz dot file either (a) calls the graphviz command "dot" to turn it into a SVG and remove the dotfile (default), or (b) if no_graphviz is True, just output the graphviz dot file Args: a NetworkX DiGraph the settings dictionary a filename (a default is provided a flag indicating whether graphviz should *not* be called Returns: 0 if everything worked will cause fatal error on failure
def visualize(G, settings, filename="dependencies", no_graphviz=False): error = settings["error"] if no_graphviz: write_dot_file(G, filename) return 0 write_dot_file(G, "tempdot") renderer = "svg" if re.search("\.jpg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.jpeg$", filename, re.IGNORECASE): renderer = "jpg" elif re.search("\.svg$", filename, re.IGNORECASE): renderer = "svg" elif re.search("\.png$", filename, re.IGNORECASE): renderer = "png" elif re.search("\.gif$", filename, re.IGNORECASE): renderer = "gif" elif re.search("\.ps$", filename, re.IGNORECASE): renderer = "ps" elif re.search("\.pdf$", filename, re.IGNORECASE): renderer = "pdf" else: renderer = "svg" filename += ".svg" command = "dot -T{} tempdot -o {}".format(renderer, filename) p = Popen(command, shell=True) p.communicate() if p.returncode: errmes = "Either graphviz is not installed, or its not on PATH" os.remove("tempdot") error(errmes) sys.exit(1) os.remove("tempdot") return 0
768,530
High-level function for creating messages. Return packed bytes. Args: text: {str} channel: {str} Either name or ID
def make_message(self, text, channel): try: channel_id = self.slack.channel_from_name(channel)['id'] except ValueError: channel_id = channel return pack({ 'text': text, 'type': 'message', 'channel': channel_id, 'id': self.message_id, })
768,541
This function returns the fields for a schema that matches the provided nautilus model. Args: model (nautilus.model.BaseModel): The model to base the field list on Returns: (dict<field_name: str, graphqlType>): A mapping of field names to graphql types
def fields_for_model(model): # the attribute arguments (no filters) args = {field.name.lower() : convert_peewee_field(field) \ for field in model.fields()} # use the field arguments, without the segments return args
768,964
This factory returns an action handler that creates a new instance of the specified model when a create action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to create when the action received. Returns: function(action_type, payload): The action handler for this model
def create_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `Model` if action_type == get_crud_action('create', name or Model): # print('handling create for ' + name or Model) try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # for each required field for requirement in Model.required_fields(): # save the name of the field field_name = requirement.name # ensure the value is in the payload # TODO: check all required fields rather than failing on the first if not field_name in payload and field_name != 'id': # yell loudly raise ValueError( "Required field not found in payload: %s" %field_name ) # create a new model new_model = Model(**payload) # save the new model instance new_model.save() # if we need to tell someone about what happened if notify: # publish the scucess event await service.event_broker.send( payload=ModelSerializer().serialize(new_model), action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
768,966
Creates a comment block Args: text (str): content of comment without # comment_prefix (str): character indicating start of comment Returns: self for chaining
def comment(self, text, comment_prefix='#'): comment = Comment(self._container) if not text.startswith(comment_prefix): text = "{} {}".format(comment_prefix, text) if not text.endswith('\n'): text = "{}{}".format(text, '\n') comment.add_line(text) self._container.structure.insert(self._idx, comment) self._idx += 1 return self
768,978
Creates a section block Args: section (str or :class:`Section`): name of section or object Returns: self for chaining
def section(self, section): if not isinstance(self._container, ConfigUpdater): raise ValueError("Sections can only be added at section level!") if isinstance(section, str): # create a new section section = Section(section, container=self._container) elif not isinstance(section, Section): raise ValueError("Parameter must be a string or Section type!") if section.name in [block.name for block in self._container if isinstance(block, Section)]: raise DuplicateSectionError(section.name) self._container.structure.insert(self._idx, section) self._idx += 1 return self
768,979
Creates a vertical space of newlines Args: newlines (int): number of empty lines Returns: self for chaining
def space(self, newlines=1): space = Space() for line in range(newlines): space.add_line('\n') self._container.structure.insert(self._idx, space) self._idx += 1 return self
768,980
Creates a new option inside a section Args: key (str): key of the option value (str or None): value of the option **kwargs: are passed to the constructor of :class:`Option` Returns: self for chaining
def option(self, key, value=None, **kwargs): if not isinstance(self._container, Section): raise ValueError("Options can only be added inside a section!") option = Option(key, value, container=self._container, **kwargs) option.value = value self._container.structure.insert(self._idx, option) self._idx += 1 return self
768,981
Add a Comment object to the section Used during initial parsing mainly Args: line (str): one line in the comment
def add_comment(self, line): if not isinstance(self.last_item, Comment): comment = Comment(self._structure) self._structure.append(comment) self.last_item.add_line(line) return self
768,983
Add a Space object to the section Used during initial parsing mainly Args: line (str): one line that defines the space, maybe whitespaces
def add_space(self, line): if not isinstance(self.last_item, Space): space = Space(self._structure) self._structure.append(space) self.last_item.add_line(line) return self
768,984
Set an option for chaining. Args: option (str): option name value (str): value, default None
def set(self, option, value=None): option = self._container.optionxform(option) if option in self.options(): self.__getitem__(option).value = value else: self.__setitem__(option, value) return self
768,992
Sets the value to a given list of options, e.g. multi-line values Args: values (list): list of values separator (str): separator for values, default: line separator indent (str): indentation depth in case of line separator
def set_values(self, values, separator='\n', indent=4*' '): self._updated = True self._multiline_value_joined = True self._values = values if separator == '\n': values.insert(0, '') separator = separator + indent self._value = separator.join(values)
768,999
Read and parse a filename. Args: filename (str): path to file encoding (str): encoding of file, default None
def read(self, filename, encoding=None): with open(filename, encoding=encoding) as fp: self._read(fp, filename) self._filename = os.path.abspath(filename)
769,002
Call ConfigParser to validate config Args: kwargs: are passed to :class:`configparser.ConfigParser`
def validate_format(self, **kwargs): args = dict( dict_type=self._dict, allow_no_value=self._allow_no_value, inline_comment_prefixes=self._inline_comment_prefixes, strict=self._strict, empty_lines_in_values=self._empty_lines_in_values ) args.update(kwargs) parser = ConfigParser(**args) updated_cfg = str(self) parser.read_string(updated_cfg)
769,009
Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. Args: section (str or :class:`Section`): name or Section type
def add_section(self, section): if section in self.sections(): raise DuplicateSectionError(section) if isinstance(section, str): # create a new section section = Section(section, container=self) elif not isinstance(section, Section): raise ValueError("Parameter must be a string or Section type!") self._structure.append(section)
769,013
Returns list of configuration options for the named section. Args: section (str): name of section Returns: list: list of option names
def options(self, section): if not self.has_section(section): raise NoSectionError(section) from None return self.__getitem__(section).options()
769,014
Gets an option value for a given section. Args: section (str): section name option (str): option name Returns: :class:`Option`: Option object holding key/value pair
def get(self, section, option): if not self.has_section(section): raise NoSectionError(section) from None section = self.__getitem__(section) option = self.optionxform(option) try: value = section[option] except KeyError: raise NoOptionError(option, section) return value
769,015
Return a list of (name, value) tuples for options or sections. If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_type) for each section. Args: section (str): optional section name, default UNSET Returns: list: list of :class:`Section` or :class:`Option` objects
def items(self, section=_UNSET): if section is _UNSET: return [(sect.name, sect) for sect in self.sections_blocks()] section = self.__getitem__(section) return [(opt.key, opt) for opt in section.option_blocks()]
769,016
Checks for the existence of a given option in a given section. Args: section (str): name of section option (str): name of option Returns: bool: whether the option exists in the given section
def has_option(self, section, option): if section not in self.sections(): return False else: option = self.optionxform(option) return option in self[section]
769,017
Set an option. Args: section (str): section name option (str): option name value (str): value, default None
def set(self, section, option, value=None): try: section = self.__getitem__(section) except KeyError: raise NoSectionError(section) from None option = self.optionxform(option) if option in section: section[option].value = value else: section[option] = value return self
769,018
Remove an option. Args: section (str): section name option (str): option name Returns: bool: whether the option was actually removed
def remove_option(self, section, option): try: section = self.__getitem__(section) except KeyError: raise NoSectionError(section) from None option = self.optionxform(option) existed = option in section.options() if existed: del section[option] return existed
769,019
Remove a file section. Args: name: name of the section Returns: bool: whether the section was actually removed
def remove_section(self, name): existed = self.has_section(name) if existed: idx = self._get_section_idx(name) del self._structure[idx] return existed
769,020
This function renders the template desginated by the argument to the designated directory using the given context. Args: template (string) : the source template to use (relative to ./templates) out_dir (string) : the name of the output directory context (dict) : the template rendering context
def render_template(template, out_dir='.', context=None): # the directory containing templates template_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'templates', template ) # the files and empty directories to copy files = [] empty_dirs = [] for (dirpath, _, filenames) in os.walk(template_directory): # if there are no files in the directory if len(filenames) == 0: # add the directory to the list empty_dirs.append(os.path.relpath(dirpath, template_directory)) # otherwise there are files in this directory else: # add the files to the list files.extend([os.path.join(dirpath, filepath) for filepath in filenames]) # for each template file for source_file in files: # open a new file that we are going to write to with open(source_file, 'r') as file: # create a template out of the source file contents template = Template(file.read()) # render the template with the given contents template_rendered = template.render(**(context or {})) # the location of the source relative to the template directory source_relpath = os.path.relpath(source_file, template_directory) # the target filename filename = os.path.join(out_dir, source_relpath) # create a jinja template out of the file path filename_rendered = Template(filename).render(**context) # the directory of the target file source_dir = os.path.dirname(filename_rendered) # if the directory doesn't exist if not os.path.exists(source_dir): # create the directories os.makedirs(source_dir) # create the target file with open(filename_rendered, 'w') as target_file: # write the rendered template to the target file target_file.write(template_rendered) # for each empty directory for dirpath in empty_dirs: try: # dirname dirname = os.path.join(out_dir, dirpath) # treat the dirname as a jinja template dirname_rendered = Template(dirname).render(**context) # if the directory doesn't exist if not os.path.exists(dirname_rendered): # create the directory in the target, replacing the name os.makedirs(dirname_rendered) except OSError as exc: # if the directory already exists if exc.errno == errno.EEXIST and os.path.isdir(dirpath): # keep going (noop) pass # otherwise its an error we don't handle else: # pass it along raise
769,024
This factory returns an action handler that deletes a new instance of the specified model when a delete action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
def delete_handler(Model, name=None, **kwds): # necessary imports from nautilus.database import db async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `model` if action_type == get_crud_action('delete', name or Model): try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # the id in the payload representing the record to delete record_id = payload['id'] if 'id' in payload else payload['pk'] # get the model matching the payload try: model_query = Model.select().where(Model.primary_key() == record_id) except KeyError: raise RuntimeError("Could not find appropriate id to remove service record.") # remove the model instance model_query.get().delete_instance() # if we need to tell someone about what happened if notify: # publish the success event await service.event_broker.send( payload='{"status":"ok"}', action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
769,029
This factory returns an action handler that responds to read requests by resolving the payload as a graphql query against the internal schema. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
def read_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, **kwds): # if the payload represents a new instance of `model` if action_type == get_crud_action('read', name or Model): # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] try: # resolve the query using the service schema resolved = service.schema.execute(payload) # create the string response response = json.dumps({ 'data': {key:value for key,value in resolved.data.items()}, 'errors': resolved.errors }) # publish the success event await service.event_broker.send( payload=response, action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # return the handler return action_handler
769,030
This action handler factory reaturns an action handler that responds to actions with CRUD types (following nautilus conventions) and performs the necessary mutation on the model's database. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
def crud_handler(Model, name=None, **kwds): # import the necessary modules from nautilus.network.events import combine_action_handlers from . import update_handler, create_handler, delete_handler, read_handler # combine them into one handler return combine_action_handlers( create_handler(Model, name=name), read_handler(Model, name=name), update_handler(Model, name=name), delete_handler(Model, name=name), )
769,031
This factory returns an action handler that updates a new instance of the specified model when a update action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to update when the action received. Returns: function(type, payload): The action handler for this model
def update_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `Model` if action_type == get_crud_action('update', name or Model): try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # grab the nam eof the primary key for the model pk_field = Model.primary_key() # make sure there is a primary key to id the model if not pk_field.name in payload: # yell loudly raise ValueError("Must specify the pk of the model when updating") # grab the matching model model = Model.select().where(pk_field == payload[pk_field.name]).get() # remove the key from the payload payload.pop(pk_field.name, None) # for every key,value pair for key, value in payload.items(): # TODO: add protection for certain fields from being # changed by the api setattr(model, key, value) # save the updates model.save() # if we need to tell someone about what happened if notify: # publish the scucess event await service.event_broker.send( payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
769,047
This function starts the service's network intefaces. Args: port (int): The port for the http server.
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs): print("Running service on http://localhost:%i. " % port + \ "Press Ctrl+C to terminate.") # apply the configuration to the service config self.config.port = port self.config.host = host # start the loop try: # if an event broker has been created for this service if self.event_broker: # start the broker self.event_broker.start() # announce the service self.loop.run_until_complete(self.announce()) # the handler for the http server http_handler = self.app.make_handler() # create an asyncio server self._http_server = self.loop.create_server(http_handler, host, port) # grab the handler for the server callback self._server_handler = self.loop.run_until_complete(self._http_server) # start the event loop self.loop.run_forever() # if the user interrupted the server except KeyboardInterrupt: # keep going pass # when we're done finally: try: # clean up the service self.cleanup() # if we end up closing before any variables get assigned except UnboundLocalError: # just ignore it (there was nothing to close) pass # close the event loop self.loop.close()
769,073
This method provides a programatic way of added invidual routes to the http server. Args: url (str): the url to be handled by the request_handler request_handler (nautilus.network.RequestHandler): The request handler
def add_http_endpoint(self, url, request_handler): self.app.router.add_route('*', url, request_handler)
769,075
This function is used to provide a sessionToken for later requests. Args: uid (str): The
async def register_user(self, password, **kwds): # so make one user = await self._create_remote_user(password=password, **kwds) # if there is no pk field if not 'pk' in user: # make sure the user has a pk field user['pk'] = user['id'] # the query to find a matching query match_query = self.model.user == user['id'] # if the user has already been registered if self.model.select().where(match_query).count() > 0: # yell loudly raise RuntimeError('The user is already registered.') # create an entry in the user password table password = self.model(user=user['id'], password=password) # save it to the database password.save() # return a dictionary with the user we created and a session token for later use return { 'user': user, 'sessionToken': self._user_session_token(user) }
769,094
This function checks if there is a user with the same uid in the remote user service Args: **kwds : the filters of the user to check for Returns: (bool): wether or not there is a matching user
async def _check_for_matching_user(self, **user_filters): # there is a matching user if there are no errors and no results from user_data = self._get_matching_user(user_filters) # return true if there were no errors and at lease one result return not user_data['errors'] and len(user_data['data'][root_query()])
769,100
This method creates a service record in the remote user service with the given email. Args: uid (str): the user identifier to create Returns: (dict): a summary of the user that was created
async def _create_remote_user(self, **payload): # the action for reading user entries read_action = get_crud_action(method='create', model='user') # see if there is a matching user user_data = await self.event_broker.ask( action_type=read_action, payload=payload ) # treat the reply like a json object return json.loads(user_data)
769,101
Load datamat at path. Parameters: path : string Absolute path of the file to load from.
def load(path, variable='Datamat'): f = h5py.File(path,'r') try: dm = fromhdf5(f[variable]) finally: f.close() return dm
769,770
Saves Datamat to path. Parameters: path : string Absolute path of the file to save to.
def save(self, path): f = h5py.File(path, 'w') try: fm_group = f.create_group('Datamat') for field in self.fieldnames(): try: fm_group.create_dataset(field, data = self.__dict__[field]) except (TypeError,) as e: # Assuming field is an object array that contains dicts which # contain numpy arrays as values sub_group = fm_group.create_group(field) for i, d in enumerate(self.__dict__[field]): index_group = sub_group.create_group(str(i)) print((field, d)) for key, value in list(d.items()): index_group.create_dataset(key, data=value) for param in self.parameters(): fm_group.attrs[param]=self.__dict__[param] finally: f.close()
769,778
Returns an iterator that iterates over unique values of field Parameters: field : string Filters the datamat for every unique value in field and yields the filtered datamat. Returns: datamat : Datamat that is filtered according to one of the unique values in 'field'.
def by_field(self, field): for value in np.unique(self.__dict__[field]): yield self.filter(self.__dict__[field] == value)
769,780
Add a new field to the datamat. Parameters: name : string Name of the new field data : list Data for the new field, must be same length as all other fields.
def add_field(self, name, data): if name in self._fields: raise ValueError if not len(data) == self._num_fix: raise ValueError self._fields.append(name) self.__dict__[name] = data
769,783
Remove a field from the datamat. Parameters: name : string Name of the field to be removed
def rm_field(self, name): if not name in self._fields: raise ValueError self._fields.remove(name) del self.__dict__[name]
769,786
Computes the distribution of angle and length combinations that were made as first saccades Parameters: fm : ocupy.fixmat The fixation data to be analysed
def firstSacDist(fm): ang, leng, ad, ld = anglendiff(fm, return_abs=True) y_arg = leng[0][np.roll(fm.fix == min(fm.fix), 1)]/fm.pixels_per_degree x_arg = reshift(ang[0][np.roll(fm.fix == min(fm.fix), 1)]) bins = [list(range(int(ceil(np.nanmax(y_arg)))+1)), np.linspace(-180, 180, 361)] return makeHist(x_arg, y_arg, fit=None, bins = bins)
769,797
Computes the distribution of trajectory lengths, i.e. the number of saccades that were made as a part of one trajectory Parameters: fm : ocupy.fixmat The fixation data to be analysed
def trajLenDist(fm): trajLen = np.roll(fm.fix, 1)[fm.fix == min(fm.fix)] val, borders = np.histogram(trajLen, bins=np.linspace(-0.5, max(trajLen)+0.5, max(trajLen)+2)) cumsum = np.cumsum(val.astype(float) / val.sum()) return cumsum, borders
769,798
Prepares the data to be replicated. Calculates the second-order length and angle dependencies between saccades and stores them in a fitted histogram. Parameters: fit : function, optional The method to use for fitting the histogram full_H1 : twodimensional numpy.ndarray, optional Where applicable, the distribution of angle and length differences to replicate with dimensions [73,361]
def initializeData(self, fit = None, full_H1=None, max_length = 40, in_deg = True): a, l, ad, ld = anglendiff(self.fm, roll=1, return_abs = True) if in_deg: self.fm.pixels_per_degree = 1 samples = np.zeros([3, len(l[0])]) samples[0] = l[0]/self.fm.pixels_per_degree samples[1] = np.roll(l[0]/self.fm.pixels_per_degree,-1) samples[2] = np.roll(reshift(ad[0]),-1) z = np.any(np.isnan(samples), axis=0) samples = samples[:,~np.isnan(samples).any(0)] if full_H1 is None: self.full_H1 = [] for i in range(1, int(ceil(max_length+1))): idx = np.logical_and(samples[0]<=i, samples[0]>i-1) if idx.any(): self.full_H1.append(makeHist(samples[2][idx], samples[1][idx], fit=fit, bins=[np.linspace(0,max_length-1,max_length),np.linspace(-180,180,361)])) # Sometimes if there's only one sample present there seems to occur a problem # with histogram calculation and the hist is filled with nans. In this case, dismiss # the hist. if np.isnan(self.full_H1[-1]).any(): self.full_H1[-1] = np.array([]) self.nosamples.append(len(samples[2][idx])) else: self.full_H1.append(np.array([])) self.nosamples.append(0) else: self.full_H1 = full_H1 self.firstLenAng_cumsum, self.firstLenAng_shape = ( compute_cumsum(firstSacDist(self.fm))) self.probability_cumsum = [] for i in range(len(self.full_H1)): if self.full_H1[i] == []: self.probability_cumsum.append(np.array([])) else: self.probability_cumsum.append(np.cumsum(self.full_H1[i].flat)) self.trajLen_cumsum, self.trajLen_borders = trajLenDist(self.fm) min_distance = 1/np.array([min((np.unique(self.probability_cumsum[i]) \ -np.roll(np.unique(self.probability_cumsum[i]),1))[1:]) \ for i in range(len(self.probability_cumsum))]) # Set a minimal resolution min_distance[min_distance<10] = 10 self.linind = {} for i in range(len(self.probability_cumsum)): self.linind['self.probability_cumsum '+repr(i)] = np.linspace(0,1,min_distance[i])[0:-1] for elem in [self.firstLenAng_cumsum, self.trajLen_cumsum]: self.linind[elem] = np.linspace(0, 1, 1/min((np.unique((elem))-np.roll(np.unique((elem)),1))[1:]))[0:-1]
769,802
Calculates the coordinates after a specific saccade was made. Parameters: (x,y) : tuple of floats or ints The coordinates before the saccade was made angle : float or int The angle that the next saccade encloses with the horizontal display border length: float or int The length of the next saccade
def _calc_xy(self, xxx_todo_changeme, angle, length): (x, y) = xxx_todo_changeme return (x+(cos(radians(angle))*length), y+(sin(radians(angle))*length))
769,803
Generates a given number of trajectories, using the method sample(). Returns a fixmat with the generated data. Parameters: num_samples : int, optional The number of trajectories that shall be generated.
def sample_many(self, num_samples = 2000): x = [] y = [] fix = [] sample = [] # XXX: Delete ProgressBar pbar = ProgressBar(widgets=[Percentage(),Bar()], maxval=num_samples).start() for s in range(0, num_samples): for i, (xs, ys) in enumerate(self.sample()): x.append(xs) y.append(ys) fix.append(i+1) sample.append(s) pbar.update(s+1) fields = {'fix':np.array(fix), 'y':np.array(y), 'x':np.array(x)} param = {'pixels_per_degree':self.fm.pixels_per_degree} out = fixmat.VectorFixmatFactory(fields, param) return out
769,806
Load fixmat at path. Parameters: path : string Absolute path of the file to load from.
def load(path): f = h5py.File(path,'r') if 'Fixmat' in f: fm_group = f['Fixmat'] else: fm_group = f['Datamat'] fields = {} params = {} for field, value in list(fm_group.items()): fields[field] = np.array(value) for param, value in list(fm_group.attrs.items()): params[param] = value f.close() return VectorFixmatFactory(fields, params)
769,809
Computes the relative bias, i.e. the distribution of saccade angles and amplitudes. Parameters: fm : DataMat The fixation data to use scale_factor : double Returns: 2D probability distribution of saccade angles and amplitudes.
def relative_bias(fm, scale_factor = 1, estimator = None): assert 'fix' in fm.fieldnames(), "Can not work without fixation numbers" excl = fm.fix - np.roll(fm.fix, 1) != 1 # Now calculate the direction where the NEXT fixation goes to diff_x = (np.roll(fm.x, 1) - fm.x)[~excl] diff_y = (np.roll(fm.y, 1) - fm.y)[~excl] # Make a histogram of diff values # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled ylim = np.round(scale_factor * fm.image_size[0]) xlim = np.round(scale_factor * fm.image_size[1]) x_steps = np.ceil(2*xlim) +1 if x_steps % 2 != 0: x_steps+=1 y_steps = np.ceil(2*ylim)+1 if y_steps % 2 != 0: y_steps+=1 e_x = np.linspace(-xlim,xlim,x_steps) e_y = np.linspace(-ylim,ylim,y_steps) #e_y = np.arange(-ylim, ylim+1) #e_x = np.arange(-xlim, xlim+1) samples = np.array(list(zip((scale_factor * diff_y), (scale_factor* diff_x)))) if estimator == None: (hist, _) = np.histogramdd(samples, (e_y, e_x)) else: hist = estimator(samples, e_y, e_x) return hist
769,811
Loads a single fixmat (fixmatfile). Parameters: fixmatfile : string The matlab fixmat that should be loaded. categories : instance of stimuli.Categories, optional Links data in categories to data in fixmat.
def FixmatFactory(fixmatfile, categories = None, var_name = 'fixmat', field_name='x'): try: data = loadmat(fixmatfile, struct_as_record = False) keys = list(data.keys()) data = data[var_name][0][0] except KeyError: raise RuntimeError('%s is not a field of the matlab structure. Possible'+ 'Keys are %s'%str(keys)) num_fix = data.__getattribute__(field_name).size # Get a list with fieldnames and a list with parameters fields = {} parameters = {} for field in data._fieldnames: if data.__getattribute__(field).size == num_fix: fields[field] = data.__getattribute__(field) else: parameters[field] = data.__getattribute__(field)[0].tolist() if len(parameters[field]) == 1: parameters[field] = parameters[field][0] # Generate FixMat fixmat = FixMat(categories = categories) fixmat._fields = list(fields.keys()) for (field, value) in list(fields.items()): fixmat.__dict__[field] = value.reshape(-1,) fixmat._parameters = parameters fixmat._subjects = None for (field, value) in list(parameters.items()): fixmat.__dict__[field] = value fixmat._num_fix = num_fix return fixmat
769,813
Constructs an categories object for all image / category combinations in the fixmat. Parameters: fm: FixMat Used for extracting valid category/image combination. loader: loader Loader that accesses the stimuli for this fixmat Returns: Categories object
def FixmatStimuliFactory(fm, loader): # Find all feature names features = [] if loader.ftrpath: assert os.access(loader.ftrpath, os.R_OK) features = os.listdir(os.path.join(loader.ftrpath, str(fm.category[0]))) # Find all images in all categories img_per_cat = {} for cat in np.unique(fm.category): if not loader.test_for_category(cat): raise ValueError('Category %s is specified in fixmat but '%( str(cat) + 'can not be located by loader')) img_per_cat[cat] = [] for img in np.unique(fm[(fm.category == cat)].filenumber): if not loader.test_for_image(cat, img): raise ValueError('Image %s in category %s is '%(str(cat), str(img)) + 'specified in fixmat but can be located by loader') img_per_cat[cat].append(img) if loader.ftrpath: for feature in features: if not loader.test_for_feature(cat, img, feature): raise RuntimeError( 'Feature %s for image %s' %(str(feature),str(img)) + ' in category %s ' %str(cat) + 'can not be located by loader') return Categories(loader, img_per_cat = img_per_cat, features = features, fixations = fm)
769,831
Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object
def kldiv_cs_model(prediction, fm): # compute histogram of fixations needed for ChaoShen corrected kl-div # image category must exist (>-1) and image_size must be non-empty assert(len(fm.image_size) == 2 and (fm.image_size[0] > 0) and (fm.image_size[1] > 0)) assert(-1 not in fm.category) # check whether fixmat contains fixations if len(fm.x) == 0: return np.NaN (scale_factor, _) = calc_resize_factor(prediction, fm.image_size) # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fm.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fm.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fm.y), (scale_factor*fm.x)))) (fdm, _) = np.histogramdd(samples, (e_y, e_x)) # compute ChaoShen corrected kl-div q = np.array(prediction, copy = True) q[q == 0] = np.finfo(q.dtype).eps q /= np.sum(q) (H, pa, la) = chao_shen(fdm) q = q[fdm > 0] cross_entropy = -np.sum((pa * np.log2(q)) / la) return (cross_entropy - H)
769,915
approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations.
def fast_roc(actuals, controls): assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
769,921
Histogram based implementation of AUC unde ROC curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations.
def faster_roc(actuals, controls): assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) if len(actuals)<500: raise RuntimeError('This method might be incorrect when '+ 'not enough actuals are present. Needs to be checked before '+ 'proceeding. Stopping here for you to do so.') actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])+np.finfo(float).eps true_pos_rate = np.nan*np.empty(thresholds.size-1) false_pos_rate = np.nan*np.empty(thresholds.size-1) num_act = float(len(actuals)) num_ctr = float(len(controls)) actuals = 1-(np.cumsum(np.histogram(actuals, thresholds)[0])/num_act) controls = 1-(np.cumsum(np.histogram(controls, thresholds)[0])/num_ctr) true_pos_rate = actuals false_pos_rate = controls #true_pos_rate = np.concatenate(([0], true_pos_rate, [1])) false_pos_rate = false_pos_rate auc = -1*np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
769,922
Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple.
def insort_event_right(self, event, lo=0, hi=None): if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(self.queue) while lo < hi: mid = (lo + hi) // 2 if event[0] < self.queue[mid][0]: hi = mid else: lo = mid + 1 self.queue.insert(lo, event)
770,403
Helper for GQL parsing to extract values from GQL expressions. This can extract the value from a GQL literal, return a Parameter for a GQL bound parameter (:1 or :foo), and interprets casts like KEY(...) and plain lists of values like (1, 2, 3). Args: func: A string indicating what kind of thing this is. args: One or more GQL values, each integer, string, or GQL literal.
def _args_to_val(func, args): from .google_imports import gql # Late import, to avoid name conflict. vals = [] for arg in args: if isinstance(arg, (int, long, basestring)): val = Parameter(arg) elif isinstance(arg, gql.Literal): val = arg.Get() else: raise TypeError('Unexpected arg (%r)' % arg) vals.append(val) if func == 'nop': if len(vals) != 1: raise TypeError('"nop" requires exactly one value') return vals[0] # May be a Parameter pfunc = ParameterizedFunction(func, vals) if pfunc.is_parameterized(): return pfunc else: return pfunc.resolve({}, {})
770,410
Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando.
def _get_prop_from_modelclass(modelclass, name): if name == '__key__': return modelclass._key parts = name.split('.') part, more = parts[0], parts[1:] prop = modelclass._properties.get(part) if prop is None: if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) else: raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) while more: part = more.pop(0) if not isinstance(prop, model.StructuredProperty): raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) maybe = getattr(prop, part, None) if isinstance(maybe, model.Property) and maybe._name == part: prop = maybe else: maybe = prop._modelclass._properties.get(part) if maybe is not None: # Must get it this way to get the copy with the long name. # (See StructuredProperty.__getattr__() for details.) prop = getattr(prop, maybe._code_name) else: if issubclass(prop._modelclass, model.Expando) and not more: prop = model.GenericProperty() prop._name = name # Bypass the restriction on dots. else: raise KeyError('Model %s has no property named %r' % (prop._modelclass._get_kind(), part)) return prop
770,411
Parse a GQL query string. Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. *args, **kwds: If present, used to call bind(). Returns: An instance of query_class.
def gql(query_string, *args, **kwds): qry = _gql(query_string) if args or kwds: qry = qry._bind(args, kwds) return qry
770,412
Parse a GQL query string (internal version). Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. query_class: Optional class to use, default Query. Returns: An instance of query_class.
def _gql(query_string, query_class=Query): from .google_imports import gql # Late import, to avoid name conflict. gql_qry = gql.GQL(query_string) kind = gql_qry.kind() if kind is None: # The query must be lacking a "FROM <kind>" class. Let Expando # stand in for the model class (it won't actually be used to # construct the results). modelclass = model.Expando else: modelclass = model.Model._lookup_model( kind, tasklets.get_context()._conn.adapter.default_model) # Adjust kind to the kind of the model class. kind = modelclass._get_kind() ancestor = None flt = gql_qry.filters() filters = list(modelclass._default_filters()) for name_op in sorted(flt): name, op = name_op values = flt[name_op] op = op.lower() if op == 'is' and name == gql.GQL._GQL__ANCESTOR: if len(values) != 1: raise ValueError('"is" requires exactly one value') [(func, args)] = values ancestor = _args_to_val(func, args) continue if op not in _OPS: raise NotImplementedError('Operation %r is not supported.' % op) for (func, args) in values: val = _args_to_val(func, args) prop = _get_prop_from_modelclass(modelclass, name) if prop._name != name: raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) ' 'returned a property whose name is %r?!' % (modelclass.__name__, name, prop._name)) if isinstance(val, ParameterizedThing): node = ParameterNode(prop, op, val) elif op == 'in': node = prop._IN(val) else: node = prop._comparison(op, val) filters.append(node) if filters: filters = ConjunctionNode(*filters) else: filters = None orders = _orderings_to_orders(gql_qry.orderings(), modelclass) offset = gql_qry.offset() limit = gql_qry.limit() if limit < 0: limit = None keys_only = gql_qry._keys_only if not keys_only: keys_only = None options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only) projection = gql_qry.projection() if gql_qry.is_distinct(): group_by = projection else: group_by = None qry = query_class(kind=kind, ancestor=ancestor, filters=filters, orders=orders, default_options=options, projection=projection, group_by=group_by) return qry
770,413
Constructor. Args: key: The Parameter key, must be either an integer or a string.
def __init__(self, key): if not isinstance(key, (int, long, basestring)): raise TypeError('Parameter key must be an integer or string, not %s' % (key,)) self.__key = key
770,419
Constructor. Args: kind: Optional kind string. ancestor: Optional ancestor Key. filters: Optional Node representing a filter expression tree. orders: Optional datastore_query.Order object. app: Optional app id. namespace: Optional namespace. default_options: Optional QueryOptions object. projection: Optional list or tuple of properties to project. group_by: Optional list or tuple of properties to group by.
def __init__(self, kind=None, ancestor=None, filters=None, orders=None, app=None, namespace=None, default_options=None, projection=None, group_by=None): # TODO(arfuller): Accept projection=Model.key to mean keys_only. # TODO(arfuller): Consider adding incremental function # group_by_property(*args) and project(*args, distinct=False). # Validating input. if ancestor is not None: if isinstance(ancestor, ParameterizedThing): if isinstance(ancestor, ParameterizedFunction): if ancestor.func != 'key': raise TypeError('ancestor cannot be a GQL function other than KEY') else: if not isinstance(ancestor, model.Key): raise TypeError('ancestor must be a Key; received %r' % (ancestor,)) if not ancestor.id(): raise ValueError('ancestor cannot be an incomplete key') if app is not None: if app != ancestor.app(): raise TypeError('app/ancestor mismatch') if namespace is None: namespace = ancestor.namespace() else: if namespace != ancestor.namespace(): raise TypeError('namespace/ancestor mismatch') if filters is not None: if not isinstance(filters, Node): raise TypeError('filters must be a query Node or None; received %r' % (filters,)) if orders is not None: if not isinstance(orders, datastore_query.Order): raise TypeError('orders must be an Order instance or None; received %r' % (orders,)) if default_options is not None: if not isinstance(default_options, datastore_rpc.BaseConfiguration): raise TypeError('default_options must be a Configuration or None; ' 'received %r' % (default_options,)) if projection is not None: if default_options.projection is not None: raise TypeError('cannot use projection= and ' 'default_options.projection at the same time') if default_options.keys_only is not None: raise TypeError('cannot use projection= and ' 'default_options.keys_only at the same time') self.__kind = kind # String. self.__ancestor = ancestor # Key. self.__filters = filters # None or Node subclass. self.__orders = orders # None or datastore_query.Order instance. self.__app = app self.__namespace = namespace self.__default_options = default_options # Checked late as _check_properties depends on local state. self.__projection = None if projection is not None: if not projection: raise TypeError('projection argument cannot be empty') if not isinstance(projection, (tuple, list)): raise TypeError( 'projection must be a tuple, list or None; received %r' % (projection,)) self._check_properties(self._to_property_names(projection)) self.__projection = tuple(projection) self.__group_by = None if group_by is not None: if not group_by: raise TypeError('group_by argument cannot be empty') if not isinstance(group_by, (tuple, list)): raise TypeError( 'group_by must be a tuple, list or None; received %r' % (group_by,)) self._check_properties(self._to_property_names(group_by)) self.__group_by = tuple(group_by)
770,444
An auto-batching wrapper for memcache.get() or .get_multi(). Args: key: Key to set. This must be a string; no prefix is applied. for_cas: If True, request and store CAS ids on the Context. namespace: Optional namespace. deadline: Optional deadline for the RPC. Returns: A Future (!) whose return value is the value retrieved from memcache, or None.
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False, deadline=None): if not isinstance(key, basestring): raise TypeError('key must be a string; received %r' % key) if not isinstance(for_cas, bool): raise TypeError('for_cas must be a bool; received %r' % for_cas) if namespace is None: namespace = namespace_manager.get_namespace() options = (for_cas, namespace, deadline) batcher = self.memcache_get_batcher if use_cache: return batcher.add_once(key, options) else: return batcher.add(key, options)
770,482
Init. Args: todo_tasklet: the tasklet that actually fires RPC and waits on a MultiRPC. It should take a list of (future, arg) pairs and an "options" as arguments. "options" are rpc options. limit: max number of items to batch for each distinct value of "options".
def __init__(self, todo_tasklet, limit): self._todo_tasklet = todo_tasklet self._limit = limit # A map from "options" to a list of (future, arg) tuple. # future is the future return from a single async operations. self._queues = {} self._running = [] # A list of in-flight todo_tasklet futures. self._cache = {}
770,514
Adds an arg and gets back a future. Args: arg: one argument for _todo_tasklet. options: rpc options. Return: An instance of future, representing the result of running _todo_tasklet without batching.
def add(self, arg, options=None): fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options)) todo = self._queues.get(options) if todo is None: utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options) if not self._queues: eventloop.add_idle(self._on_idle) todo = self._queues[options] = [] todo.append((fut, arg)) if len(todo) >= self._limit: del self._queues[options] self.run_queue(options, todo) return fut
770,516
Passes exception along. Args: batch_fut: the batch future returned by running todo_tasklet. todo: (fut, option) pair. fut is the future return by each add() call. If the batch fut was successful, it has already called fut.set_result() on other individual futs. This method only handles when the batch fut encountered an exception.
def _finished_callback(self, batch_fut, todo): self._running.remove(batch_fut) err = batch_fut.get_exception() if err is not None: tb = batch_fut.get_traceback() for (fut, _) in todo: if not fut.done(): fut.set_exception(err, tb)
770,519
Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values.
def get_namespaces(start=None, end=None): q = Namespace.query() if start is not None: q = q.filter(Namespace.key >= Namespace.key_for_namespace(start)) if end is not None: q = q.filter(Namespace.key < Namespace.key_for_namespace(end)) return [x.namespace_name for x in q]
770,523
Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values.
def get_kinds(start=None, end=None): q = Kind.query() if start is not None and start != '': q = q.filter(Kind.key >= Kind.key_for_kind(start)) if end is not None: if end == '': return [] q = q.filter(Kind.key < Kind.key_for_kind(end)) return [x.kind_name for x in q]
770,524
Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values.
def get_properties_of_kind(kind, start=None, end=None): q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return [] q = q.filter(Property.key < Property.key_for_property(kind, end)) return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
770,525
Return all representations of properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A dictionary mapping property names to its list of representations.
def get_representations_of_kind(kind, start=None, end=None): q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return {} q = q.filter(Property.key < Property.key_for_property(kind, end)) result = {} for property in q: result[property.property_name] = property.property_representation return result
770,526
Return the version of the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The version of the entity group containing key. This version is guaranteed to increase on every change to the entity group. The version may increase even in the absence of user-visible changes to the entity group. May return None if the entity group was never written to. On non-HR datatores, this function returns None.
def get_entity_group_version(key): eg = EntityGroup.key_for_entity_group(key).get() if eg: return eg.version else: return None
770,527
Return the Key for a namespace. Args: namespace: A string giving the namespace whose key is requested. Returns: The Key for the namespace.
def key_for_namespace(cls, namespace): if namespace: return model.Key(cls.KIND_NAME, namespace) else: return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
770,528
Return the __property__ key for property of kind. Args: kind: kind whose key is requested. property: property whose key is requested. Returns: The key for property of kind.
def key_for_property(cls, kind, property): return model.Key(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
770,529
Return the kind specified by a given __property__ key. Args: key: key whose kind name is requested. Returns: The kind specified by key.
def key_to_kind(cls, key): if key.kind() == Kind.KIND_NAME: return key.id() else: return key.parent().id()
770,530
Return the key for the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The __entity_group__ key for the entity group containing key.
def key_for_entity_group(cls, key): return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
770,531
Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty.
def _make_ctx_options(ctx_options, config_cls=ContextOptions): if not ctx_options: return None for key in list(ctx_options): translation = _OPTION_TRANSLATIONS.get(key) if translation: if translation in ctx_options: raise ValueError('Cannot specify %s and %s at the same time' % (key, translation)) ctx_options[translation] = ctx_options.pop(key) return config_cls(**ctx_options)
770,534
Set the context cache policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should be cached. May be None.
def set_cache_policy(self, func): if func is None: func = self.default_cache_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._cache_policy = func
770,543
Return whether to use the context cache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached, False otherwise.
def _use_cache(self, key, options=None): flag = ContextOptions.use_cache(options) if flag is None: flag = self._cache_policy(key) if flag is None: flag = ContextOptions.use_cache(self._conn.config) if flag is None: flag = True return flag
770,544
Set the memcache policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should be cached. May be None.
def set_memcache_policy(self, func): if func is None: func = self.default_memcache_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._memcache_policy = func
770,545
Return whether to use memcache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached in memcache, False otherwise.
def _use_memcache(self, key, options=None): flag = ContextOptions.use_memcache(options) if flag is None: flag = self._memcache_policy(key) if flag is None: flag = ContextOptions.use_memcache(self._conn.config) if flag is None: flag = True return flag
770,546
Default datastore policy. This defers to _use_datastore on the Model class. Args: key: Key instance. Returns: A bool or None.
def default_datastore_policy(key): flag = None if key is not None: modelclass = model.Model._kind_map.get(key.kind()) if modelclass is not None: policy = getattr(modelclass, '_use_datastore', None) if policy is not None: if isinstance(policy, bool): flag = policy else: flag = policy(key) return flag
770,547
Set the context datastore policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should use the datastore. May be None.
def set_datastore_policy(self, func): if func is None: func = self.default_datastore_policy elif isinstance(func, bool): func = lambda unused_key, flag=func: flag self._datastore_policy = func
770,548
Return whether to use the datastore for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the datastore should be used, False otherwise.
def _use_datastore(self, key, options=None): flag = ContextOptions.use_datastore(options) if flag is None: flag = self._datastore_policy(key) if flag is None: flag = ContextOptions.use_datastore(self._conn.config) if flag is None: flag = True return flag
770,549
Default memcache timeout policy. This defers to _memcache_timeout on the Model class. Args: key: Key instance. Returns: Memcache timeout to use (integer), or None.
def default_memcache_timeout_policy(key): timeout = None if key is not None and isinstance(key, model.Key): modelclass = model.Model._kind_map.get(key.kind()) if modelclass is not None: policy = getattr(modelclass, '_memcache_timeout', None) if policy is not None: if isinstance(policy, (int, long)): timeout = policy else: timeout = policy(key) return timeout
770,550
Set the policy function for memcache timeout (expiration). Args: func: A function that accepts a key instance as argument and returns an integer indicating the desired memcache timeout. May be None. If the function returns 0 it implies the default timeout.
def set_memcache_timeout_policy(self, func): if func is None: func = self.default_memcache_timeout_policy elif isinstance(func, (int, long)): func = lambda unused_key, flag=func: flag self._memcache_timeout_policy = func
770,551
Returns a cached Model instance given the entity key if available. Args: key: Key instance. Returns: A Model instance if the key exists in the cache.
def _load_from_cache_if_available(self, key): if key in self._cache: entity = self._cache[key] # May be None, meaning "doesn't exist". if entity is None or entity._key == key: # If entity's key didn't change later, it is ok. # See issue 13. http://goo.gl/jxjOP raise tasklets.Return(entity)
770,553
Return a Model instance given the entity key. It will use the context cache if the cache policy for the given key is enabled. Args: key: Key instance. **ctx_options: Context options. Returns: A Model instance if the key exists in the datastore; None otherwise.
def get(self, key, **ctx_options): options = _make_ctx_options(ctx_options) use_cache = self._use_cache(key, options) if use_cache: self._load_from_cache_if_available(key) use_datastore = self._use_datastore(key, options) if (use_datastore and isinstance(self._conn, datastore_rpc.TransactionalConnection)): use_memcache = False else: use_memcache = self._use_memcache(key, options) ns = key.namespace() memcache_deadline = None # Avoid worries about uninitialized variable. if use_memcache: mkey = self._memcache_prefix + key.urlsafe() memcache_deadline = self._get_memcache_deadline(options) mvalue = yield self.memcache_get(mkey, for_cas=use_datastore, namespace=ns, use_cache=True, deadline=memcache_deadline) # A value may have appeared while yielding. if use_cache: self._load_from_cache_if_available(key) if mvalue not in (_LOCKED, None): cls = model.Model._lookup_model(key.kind(), self._conn.adapter.default_model) pb = entity_pb.EntityProto() try: pb.MergePartialFromString(mvalue) except ProtocolBuffer.ProtocolBufferDecodeError: logging.warning('Corrupt memcache entry found ' 'with key %s and namespace %s' % (mkey, ns)) mvalue = None else: entity = cls._from_pb(pb) # Store the key on the entity since it wasn't written to memcache. entity._key = key if use_cache: # Update in-memory cache. self._cache[key] = entity raise tasklets.Return(entity) if mvalue is None and use_datastore: yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns, use_cache=True, deadline=memcache_deadline) yield self.memcache_gets(mkey, namespace=ns, use_cache=True, deadline=memcache_deadline) if not use_datastore: # NOTE: Do not cache this miss. In some scenarios this would # prevent an app from working properly. raise tasklets.Return(None) if use_cache: entity = yield self._get_batcher.add_once(key, options) else: entity = yield self._get_batcher.add(key, options) if entity is not None: if use_memcache and mvalue != _LOCKED: # Don't serialize the key since it's already the memcache key. pbs = entity._to_pb(set_key=False).SerializePartialToString() # Don't attempt to write to memcache if too big. Note that we # use LBYL ("look before you leap") because a multi-value # memcache operation would fail for all entities rather than # for just the one that's too big. (Also, the AutoBatcher # class doesn't pass back exceptions very well.) if len(pbs) <= memcache.MAX_VALUE_SIZE: timeout = self._get_memcache_timeout(key, options) # Don't use fire-and-forget -- for users who forget # @ndb.toplevel, it's too painful to diagnose why their simple # code using a single synchronous call doesn't seem to use # memcache. See issue 105. http://goo.gl/JQZxp yield self.memcache_cas(mkey, pbs, time=timeout, namespace=ns, deadline=memcache_deadline) if use_cache: # Cache hit or miss. NOTE: In this case it is okay to cache a # miss; the datastore is the ultimate authority. self._cache[key] = entity raise tasklets.Return(entity)
770,554
Marks a task as done. Args: task_id: The integer id of the task to update. Raises: ValueError: if the requested task doesn't exist.
def mark_done(task_id): task = Task.get_by_id(task_id) if task is None: raise ValueError('Task with id %d does not exist' % task_id) task.done = True task.put()
770,582
Converts a list of tasks to a list of string representations. Args: tasks: A list of the tasks to convert. Returns: A list of string formatted tasks.
def format_tasks(tasks): return ['%d : %s (%s)' % (task.key.id(), task.description, ('done' if task.done else 'created %s' % task.created)) for task in tasks]
770,583
Accepts a string command and performs an action. Args: command: the command to run as a string.
def handle_command(command): try: cmds = command.split(None, 1) cmd = cmds[0] if cmd == 'new': add_task(get_arg(cmds)) elif cmd == 'done': mark_done(int(get_arg(cmds))) elif cmd == 'list': for task in format_tasks(list_tasks()): print task elif cmd == 'delete': delete_task(int(get_arg(cmds))) else: print_usage() except Exception, e: # pylint: disable=broad-except print e print_usage()
770,584
Parse a BlobInfo record from file upload field_storage. Args: field_storage: cgi.FieldStorage that represents uploaded blob. Returns: BlobInfo record as parsed from the field-storage instance. None if there was no field_storage. Raises: BlobInfoParseError when provided field_storage does not contain enough information to construct a BlobInfo object.
def parse_blob_info(field_storage): if field_storage is None: return None field_name = field_storage.name def get_value(dct, name): value = dct.get(name, None) if value is None: raise BlobInfoParseError( 'Field %s has no %s.' % (field_name, name)) return value filename = get_value(field_storage.disposition_options, 'filename') blob_key_str = get_value(field_storage.type_options, 'blob-key') blob_key = BlobKey(blob_key_str) upload_content = email.message_from_file(field_storage.file) content_type = get_value(upload_content, 'content-type') size = get_value(upload_content, 'content-length') creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER) md5_hash_encoded = get_value(upload_content, 'content-md5') md5_hash = base64.urlsafe_b64decode(md5_hash_encoded) try: size = int(size) except (TypeError, ValueError): raise BlobInfoParseError( '%s is not a valid value for %s size.' % (size, field_name)) try: creation = blobstore._parse_creation(creation_string, field_name) except blobstore._CreationFormatError, err: raise BlobInfoParseError(str(err)) return BlobInfo(id=blob_key_str, content_type=content_type, creation=creation, filename=filename, size=size, md5_hash=md5_hash, )
770,590