repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
richardliaw/track
track/project.py
Project.results
def results(self, trial_ids): """ Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union. """ metadata_folder = os.path.join(self.log_dir, constants.METADATA_FOLDER) dfs = [] # TODO: various file-creation corner cases like the result file not # always existing if stuff is not logged and etc should be ironed out # (would probably be easier if we had a centralized Sync class which # relied on some formal remote store semantics). for trial_id in trial_ids: # TODO constants should just contain the recipes for filename # construction instead of this multi-file implicit constraint result_file = os.path.join( metadata_folder, trial_id + "_" + constants.RESULT_SUFFIX) assert os.path.isfile(result_file), result_file dfs.append(pd.read_json(result_file, typ='frame', lines=True)) df = pd.concat(dfs, axis=0, ignore_index=True, sort=False) return df
python
def results(self, trial_ids): """ Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union. """ metadata_folder = os.path.join(self.log_dir, constants.METADATA_FOLDER) dfs = [] # TODO: various file-creation corner cases like the result file not # always existing if stuff is not logged and etc should be ironed out # (would probably be easier if we had a centralized Sync class which # relied on some formal remote store semantics). for trial_id in trial_ids: # TODO constants should just contain the recipes for filename # construction instead of this multi-file implicit constraint result_file = os.path.join( metadata_folder, trial_id + "_" + constants.RESULT_SUFFIX) assert os.path.isfile(result_file), result_file dfs.append(pd.read_json(result_file, typ='frame', lines=True)) df = pd.concat(dfs, axis=0, ignore_index=True, sort=False) return df
[ "def", "results", "(", "self", ",", "trial_ids", ")", ":", "metadata_folder", "=", "os", ".", "path", ".", "join", "(", "self", ".", "log_dir", ",", "constants", ".", "METADATA_FOLDER", ")", "dfs", "=", "[", "]", "# TODO: various file-creation corner cases lik...
Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union.
[ "Accepts", "a", "sequence", "of", "trial", "ids", "and", "returns", "a", "pandas", "dataframe", "with", "the", "schema" ]
train
https://github.com/richardliaw/track/blob/7ac42ea34e5c1d7bb92fd813e938835a06a63fc7/track/project.py#L38-L65
richardliaw/track
track/project.py
Project.fetch_artifact
def fetch_artifact(self, trial_id, prefix): """ Verifies that all children of the artifact prefix path are available locally. Fetches them if not. Returns the local path to the given trial's artifacts at the specified prefix, which is always just {log_dir}/{trial_id}/{prefix} """ # TODO: general windows concern: local prefix will be in # backslashes but remote dirs will be expecting / # TODO: having s3 logic split between project and sync.py # worries me local = os.path.join(self.log_dir, trial_id, prefix) if self.upload_dir: remote = '/'.join([self.upload_dir, trial_id, prefix]) _remote_to_local_sync(remote, local) return local
python
def fetch_artifact(self, trial_id, prefix): """ Verifies that all children of the artifact prefix path are available locally. Fetches them if not. Returns the local path to the given trial's artifacts at the specified prefix, which is always just {log_dir}/{trial_id}/{prefix} """ # TODO: general windows concern: local prefix will be in # backslashes but remote dirs will be expecting / # TODO: having s3 logic split between project and sync.py # worries me local = os.path.join(self.log_dir, trial_id, prefix) if self.upload_dir: remote = '/'.join([self.upload_dir, trial_id, prefix]) _remote_to_local_sync(remote, local) return local
[ "def", "fetch_artifact", "(", "self", ",", "trial_id", ",", "prefix", ")", ":", "# TODO: general windows concern: local prefix will be in", "# backslashes but remote dirs will be expecting /", "# TODO: having s3 logic split between project and sync.py", "# worries me", "local", "=", ...
Verifies that all children of the artifact prefix path are available locally. Fetches them if not. Returns the local path to the given trial's artifacts at the specified prefix, which is always just {log_dir}/{trial_id}/{prefix}
[ "Verifies", "that", "all", "children", "of", "the", "artifact", "prefix", "path", "are", "available", "locally", ".", "Fetches", "them", "if", "not", "." ]
train
https://github.com/richardliaw/track/blob/7ac42ea34e5c1d7bb92fd813e938835a06a63fc7/track/project.py#L68-L86
p3trus/slave
slave/oxford/itc503.py
ITC503.scan_temperature
def scan_temperature(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ # set target temperature to current control temperature self.target_temperature = Tset = self.control_temperature # we use a positive sign for the sweep rate if we sweep up and negative # if we sweep down. rate = abs(rate) if temperature - Tset > 0 else -abs(rate) t_last = time.time() time.sleep(delay) while True: measure() # Update setpoint t_now = time.time() dt = t_now - t_last dT = dt * rate / 60. t_last = t_now if abs(temperature - Tset) < abs(dT): self.target_temperature = temperature break else: self.target_temperature = Tset = Tset + dT time.sleep(delay)
python
def scan_temperature(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ # set target temperature to current control temperature self.target_temperature = Tset = self.control_temperature # we use a positive sign for the sweep rate if we sweep up and negative # if we sweep down. rate = abs(rate) if temperature - Tset > 0 else -abs(rate) t_last = time.time() time.sleep(delay) while True: measure() # Update setpoint t_now = time.time() dt = t_now - t_last dT = dt * rate / 60. t_last = t_now if abs(temperature - Tset) < abs(dT): self.target_temperature = temperature break else: self.target_temperature = Tset = Tset + dT time.sleep(delay)
[ "def", "scan_temperature", "(", "self", ",", "measure", ",", "temperature", ",", "rate", ",", "delay", "=", "1", ")", ":", "# set target temperature to current control temperature", "self", ".", "target_temperature", "=", "Tset", "=", "self", ".", "control_temperatu...
Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds.
[ "Performs", "a", "temperature", "scan", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/oxford/itc503.py#L165-L198
p3trus/slave
slave/oxford/itc503.py
ITC503.scan_temperature_old
def scan_temperature_old(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ self.activity = 'hold' # Clear old sweep table self.sweep_table.clear() # Use current temperature as target temperature # and calculate sweep time. current_temperature = self.control_temperature sweep_time = abs((temperature - current_temperature) / rate) self.sweep_table[0] = temperature, sweep_time, 0. self.sweep_table[-1] = temperature, 0., 0. self.activity = 'sweep' while self.activity == 'sweep': measure() time.sleep(delay)
python
def scan_temperature_old(self, measure, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ self.activity = 'hold' # Clear old sweep table self.sweep_table.clear() # Use current temperature as target temperature # and calculate sweep time. current_temperature = self.control_temperature sweep_time = abs((temperature - current_temperature) / rate) self.sweep_table[0] = temperature, sweep_time, 0. self.sweep_table[-1] = temperature, 0., 0. self.activity = 'sweep' while self.activity == 'sweep': measure() time.sleep(delay)
[ "def", "scan_temperature_old", "(", "self", ",", "measure", ",", "temperature", ",", "rate", ",", "delay", "=", "1", ")", ":", "self", ".", "activity", "=", "'hold'", "# Clear old sweep table", "self", ".", "sweep_table", ".", "clear", "(", ")", "# Use curre...
Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds.
[ "Performs", "a", "temperature", "scan", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/oxford/itc503.py#L200-L227
p3trus/slave
slave/oxford/itc503.py
ITC503.set_temperature
def set_temperature(self, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ self.scan_temperature(lambda: None, temperature, rate, delay)
python
def set_temperature(self, temperature, rate, delay=1): """Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds. """ self.scan_temperature(lambda: None, temperature, rate, delay)
[ "def", "set_temperature", "(", "self", ",", "temperature", ",", "rate", ",", "delay", "=", "1", ")", ":", "self", ".", "scan_temperature", "(", "lambda", ":", "None", ",", "temperature", ",", "rate", ",", "delay", ")" ]
Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds.
[ "Performs", "a", "temperature", "scan", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/oxford/itc503.py#L229-L241
p3trus/slave
slave/oxford/itc503.py
ITC503.set_temperature_old
def set_temperature_old(self, temperature, rate, wait_for_stability=True, delay=1): """Sets the temperature. .. note:: For complex sweep sequences, checkout :attr:`ITC503.sweep_table`. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param wait_for_stability: If wait_for_stability is `True`, the function call blocks until the target temperature is reached and stable. :param delay: The delay specifies the frequency how often the status is checked. """ self.activity = 'hold' # Clear old sweep table self.sweep_table.clear() # Use current temperature as target temperature # and calculate sweep time. current_temperature = self.control_temperature sweep_time = abs((temperature - current_temperature) / rate) self.sweep_table[0] = temperature, sweep_time, 0. self.sweep_table[-1] = temperature, 0., 0. self.activity = 'sweep' if wait_for_stability: while self.activity == 'sweep': time.sleep(delay)
python
def set_temperature_old(self, temperature, rate, wait_for_stability=True, delay=1): """Sets the temperature. .. note:: For complex sweep sequences, checkout :attr:`ITC503.sweep_table`. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param wait_for_stability: If wait_for_stability is `True`, the function call blocks until the target temperature is reached and stable. :param delay: The delay specifies the frequency how often the status is checked. """ self.activity = 'hold' # Clear old sweep table self.sweep_table.clear() # Use current temperature as target temperature # and calculate sweep time. current_temperature = self.control_temperature sweep_time = abs((temperature - current_temperature) / rate) self.sweep_table[0] = temperature, sweep_time, 0. self.sweep_table[-1] = temperature, 0., 0. self.activity = 'sweep' if wait_for_stability: while self.activity == 'sweep': time.sleep(delay)
[ "def", "set_temperature_old", "(", "self", ",", "temperature", ",", "rate", ",", "wait_for_stability", "=", "True", ",", "delay", "=", "1", ")", ":", "self", ".", "activity", "=", "'hold'", "# Clear old sweep table", "self", ".", "sweep_table", ".", "clear", ...
Sets the temperature. .. note:: For complex sweep sequences, checkout :attr:`ITC503.sweep_table`. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param wait_for_stability: If wait_for_stability is `True`, the function call blocks until the target temperature is reached and stable. :param delay: The delay specifies the frequency how often the status is checked.
[ "Sets", "the", "temperature", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/oxford/itc503.py#L243-L273
pudo/jsongraph
jsongraph/metadata.py
MetaData._load
def _load(self): """ Load provenance info from the main store. """ graph = self.context.parent.graph.get_context(self.context.identifier) data = {} for (_, p, o) in graph.triples((self.context.identifier, None, None)): if not p.startswith(META): continue name = p[len(META):] data[name] = o.toPython() return data
python
def _load(self): """ Load provenance info from the main store. """ graph = self.context.parent.graph.get_context(self.context.identifier) data = {} for (_, p, o) in graph.triples((self.context.identifier, None, None)): if not p.startswith(META): continue name = p[len(META):] data[name] = o.toPython() return data
[ "def", "_load", "(", "self", ")", ":", "graph", "=", "self", ".", "context", ".", "parent", ".", "graph", ".", "get_context", "(", "self", ".", "context", ".", "identifier", ")", "data", "=", "{", "}", "for", "(", "_", ",", "p", ",", "o", ")", ...
Load provenance info from the main store.
[ "Load", "provenance", "info", "from", "the", "main", "store", "." ]
train
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/metadata.py#L22-L31
pudo/jsongraph
jsongraph/metadata.py
MetaData.generate
def generate(self): """ Add provenance info to the context graph. """ t = (self.context.identifier, RDF.type, META.Provenance) if t not in self.context.graph: self.context.graph.add(t) for name, value in self.data.items(): pat = (self.context.identifier, META[name], None) if pat in self.context.graph: self.context.graph.remove(pat) self.context.graph.add((pat[0], META[name], Literal(value)))
python
def generate(self): """ Add provenance info to the context graph. """ t = (self.context.identifier, RDF.type, META.Provenance) if t not in self.context.graph: self.context.graph.add(t) for name, value in self.data.items(): pat = (self.context.identifier, META[name], None) if pat in self.context.graph: self.context.graph.remove(pat) self.context.graph.add((pat[0], META[name], Literal(value)))
[ "def", "generate", "(", "self", ")", ":", "t", "=", "(", "self", ".", "context", ".", "identifier", ",", "RDF", ".", "type", ",", "META", ".", "Provenance", ")", "if", "t", "not", "in", "self", ".", "context", ".", "graph", ":", "self", ".", "con...
Add provenance info to the context graph.
[ "Add", "provenance", "info", "to", "the", "context", "graph", "." ]
train
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/metadata.py#L33-L42
inveniosoftware/invenio-openaire
invenio_openaire/resolvers/funders.py
jsonresolver_loader
def jsonresolver_loader(url_map): """Jsonresolver hook for funders resolving.""" def endpoint(doi_code): pid_value = "10.13039/{0}".format(doi_code) _, record = Resolver(pid_type='frdoi', object_type='rec', getter=Record.get_record).resolve(pid_value) return record pattern = '/10.13039/<doi_code>' url_map.add(Rule(pattern, endpoint=endpoint, host='doi.org')) url_map.add(Rule(pattern, endpoint=endpoint, host='dx.doi.org'))
python
def jsonresolver_loader(url_map): """Jsonresolver hook for funders resolving.""" def endpoint(doi_code): pid_value = "10.13039/{0}".format(doi_code) _, record = Resolver(pid_type='frdoi', object_type='rec', getter=Record.get_record).resolve(pid_value) return record pattern = '/10.13039/<doi_code>' url_map.add(Rule(pattern, endpoint=endpoint, host='doi.org')) url_map.add(Rule(pattern, endpoint=endpoint, host='dx.doi.org'))
[ "def", "jsonresolver_loader", "(", "url_map", ")", ":", "def", "endpoint", "(", "doi_code", ")", ":", "pid_value", "=", "\"10.13039/{0}\"", ".", "format", "(", "doi_code", ")", "_", ",", "record", "=", "Resolver", "(", "pid_type", "=", "'frdoi'", ",", "obj...
Jsonresolver hook for funders resolving.
[ "Jsonresolver", "hook", "for", "funders", "resolving", "." ]
train
https://github.com/inveniosoftware/invenio-openaire/blob/71860effff6abe7f658d3a11894e064202ef1c36/invenio_openaire/resolvers/funders.py#L35-L45
p3trus/slave
slave/srs/sr830.py
SR830.snap
def snap(self, *args): """Records up to 6 parameters at a time. :param args: Specifies the values to record. Valid ones are 'X', 'Y', 'R', 'theta', 'AuxIn1', 'AuxIn2', 'AuxIn3', 'AuxIn4', 'Ref', 'CH1' and 'CH2'. If none are given 'X' and 'Y' are used. """ # TODO: Do not use transport directly. params = {'X': 1, 'Y': 2, 'R': 3, 'Theta': 4, 'AuxIn1': 5, 'AuxIn2': 6, 'AuxIn3': 7, 'AuxIn4': 8, 'Ref': 9, 'CH1': 10, 'CH2': 11} if not args: args = ['X', 'Y'] if len(args) > 6: raise ValueError('Too many parameters (max: 6).') cmd = 'SNAP? ' + ','.join(map(lambda x: str(params[x]), args)) result = self.transport.ask(cmd) return map(float, result.split(','))
python
def snap(self, *args): """Records up to 6 parameters at a time. :param args: Specifies the values to record. Valid ones are 'X', 'Y', 'R', 'theta', 'AuxIn1', 'AuxIn2', 'AuxIn3', 'AuxIn4', 'Ref', 'CH1' and 'CH2'. If none are given 'X' and 'Y' are used. """ # TODO: Do not use transport directly. params = {'X': 1, 'Y': 2, 'R': 3, 'Theta': 4, 'AuxIn1': 5, 'AuxIn2': 6, 'AuxIn3': 7, 'AuxIn4': 8, 'Ref': 9, 'CH1': 10, 'CH2': 11} if not args: args = ['X', 'Y'] if len(args) > 6: raise ValueError('Too many parameters (max: 6).') cmd = 'SNAP? ' + ','.join(map(lambda x: str(params[x]), args)) result = self.transport.ask(cmd) return map(float, result.split(','))
[ "def", "snap", "(", "self", ",", "*", "args", ")", ":", "# TODO: Do not use transport directly.", "params", "=", "{", "'X'", ":", "1", ",", "'Y'", ":", "2", ",", "'R'", ":", "3", ",", "'Theta'", ":", "4", ",", "'AuxIn1'", ":", "5", ",", "'AuxIn2'", ...
Records up to 6 parameters at a time. :param args: Specifies the values to record. Valid ones are 'X', 'Y', 'R', 'theta', 'AuxIn1', 'AuxIn2', 'AuxIn3', 'AuxIn4', 'Ref', 'CH1' and 'CH2'. If none are given 'X' and 'Y' are used.
[ "Records", "up", "to", "6", "parameters", "at", "a", "time", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/srs/sr830.py#L461-L478
p3trus/slave
slave/srs/sr830.py
SR830.trace
def trace(self, buffer, start, length=1): """Reads the points stored in the channel buffer. :param buffer: Selects the channel buffer (either 1 or 2). :param start: Selects the bin where the reading starts. :param length: The number of bins to read. .. todo:: Use binary command TRCB to speed up data transmission. """ # TODO: Do not use transport directly. query = 'TRCA? {0}, {1}, {2}'.format(buffer, start, length) result = self.transport.ask(query) # Result format: "1.0e-004,1.2e-004,". Strip trailing comma then split. return (float(f) for f in result.strip(',').split(','))
python
def trace(self, buffer, start, length=1): """Reads the points stored in the channel buffer. :param buffer: Selects the channel buffer (either 1 or 2). :param start: Selects the bin where the reading starts. :param length: The number of bins to read. .. todo:: Use binary command TRCB to speed up data transmission. """ # TODO: Do not use transport directly. query = 'TRCA? {0}, {1}, {2}'.format(buffer, start, length) result = self.transport.ask(query) # Result format: "1.0e-004,1.2e-004,". Strip trailing comma then split. return (float(f) for f in result.strip(',').split(','))
[ "def", "trace", "(", "self", ",", "buffer", ",", "start", ",", "length", "=", "1", ")", ":", "# TODO: Do not use transport directly.", "query", "=", "'TRCA? {0}, {1}, {2}'", ".", "format", "(", "buffer", ",", "start", ",", "length", ")", "result", "=", "self...
Reads the points stored in the channel buffer. :param buffer: Selects the channel buffer (either 1 or 2). :param start: Selects the bin where the reading starts. :param length: The number of bins to read. .. todo:: Use binary command TRCB to speed up data transmission.
[ "Reads", "the", "points", "stored", "in", "the", "channel", "buffer", "." ]
train
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/srs/sr830.py#L484-L498
twisted/mantissa
xmantissa/offering.py
isAppStore
def isAppStore(s): """ Return whether the given store is an application store or not. @param s: A Store. """ if s.parent is None: return False substore = s.parent.getItemByID(s.idInParent) return s.parent.query(InstalledOffering, InstalledOffering.application == substore ).count() > 0
python
def isAppStore(s): """ Return whether the given store is an application store or not. @param s: A Store. """ if s.parent is None: return False substore = s.parent.getItemByID(s.idInParent) return s.parent.query(InstalledOffering, InstalledOffering.application == substore ).count() > 0
[ "def", "isAppStore", "(", "s", ")", ":", "if", "s", ".", "parent", "is", "None", ":", "return", "False", "substore", "=", "s", ".", "parent", ".", "getItemByID", "(", "s", ".", "idInParent", ")", "return", "s", ".", "parent", ".", "query", "(", "In...
Return whether the given store is an application store or not. @param s: A Store.
[ "Return", "whether", "the", "given", "store", "is", "an", "application", "store", "or", "not", "." ]
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/offering.py#L192-L202
twisted/mantissa
xmantissa/offering.py
OfferingAdapter.getInstalledOfferings
def getInstalledOfferings(self): """ Return a mapping from the name of each L{InstalledOffering} in C{self._siteStore} to the corresponding L{IOffering} plugins. """ d = {} installed = self._siteStore.query(InstalledOffering) for installation in installed: offering = installation.getOffering() if offering is not None: d[offering.name] = offering return d
python
def getInstalledOfferings(self): """ Return a mapping from the name of each L{InstalledOffering} in C{self._siteStore} to the corresponding L{IOffering} plugins. """ d = {} installed = self._siteStore.query(InstalledOffering) for installation in installed: offering = installation.getOffering() if offering is not None: d[offering.name] = offering return d
[ "def", "getInstalledOfferings", "(", "self", ")", ":", "d", "=", "{", "}", "installed", "=", "self", ".", "_siteStore", ".", "query", "(", "InstalledOffering", ")", "for", "installation", "in", "installed", ":", "offering", "=", "installation", ".", "getOffe...
Return a mapping from the name of each L{InstalledOffering} in C{self._siteStore} to the corresponding L{IOffering} plugins.
[ "Return", "a", "mapping", "from", "the", "name", "of", "each", "L", "{", "InstalledOffering", "}", "in", "C", "{", "self", ".", "_siteStore", "}", "to", "the", "corresponding", "L", "{", "IOffering", "}", "plugins", "." ]
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/offering.py#L107-L118
twisted/mantissa
xmantissa/offering.py
OfferingAdapter.installOffering
def installOffering(self, offering): """ Install the given offering:: - Create and install the powerups in its I{siteRequirements} list. - Create an application L{Store} and a L{LoginAccount} referring to it. Install the I{appPowerups} on the application store. - Create an L{InstalledOffering. Perform all of these tasks in a transaction managed within the scope of this call (that means you should not call this function inside a transaction, or you should not handle any exceptions it raises inside an externally managed transaction). @type offering: L{IOffering} @param offering: The offering to install. @return: The C{InstalledOffering} item created. """ for off in self._siteStore.query( InstalledOffering, InstalledOffering.offeringName == offering.name): raise OfferingAlreadyInstalled(off) def siteSetup(): for (requiredInterface, requiredPowerup) in offering.siteRequirements: if requiredInterface is not None: nn = requiredInterface(self._siteStore, None) if nn is not None: continue if requiredPowerup is None: raise NotImplementedError( 'Interface %r required by %r but not provided by %r' % (requiredInterface, offering, self._siteStore)) self._siteStore.findOrCreate( requiredPowerup, lambda p: installOn(p, self._siteStore)) ls = self._siteStore.findOrCreate(userbase.LoginSystem) substoreItem = substore.SubStore.createNew( self._siteStore, ('app', offering.name + '.axiom')) ls.addAccount(offering.name, None, None, internal=True, avatars=substoreItem) from xmantissa.publicweb import PublicWeb PublicWeb(store=self._siteStore, application=substoreItem, prefixURL=offering.name) ss = substoreItem.open() def appSetup(): for pup in offering.appPowerups: installOn(pup(store=ss), ss) ss.transact(appSetup) # Woops, we need atomic cross-store transactions. io = InstalledOffering( store=self._siteStore, offeringName=offering.name, application=substoreItem) #Some new themes may be available now. Clear the theme cache #so they can show up. #XXX This is pretty terrible -- there #really should be a scheme by which ThemeCache instances can #be non-global. Fix this at the earliest opportunity. from xmantissa import webtheme webtheme.theThemeCache.emptyCache() return io return self._siteStore.transact(siteSetup)
python
def installOffering(self, offering): """ Install the given offering:: - Create and install the powerups in its I{siteRequirements} list. - Create an application L{Store} and a L{LoginAccount} referring to it. Install the I{appPowerups} on the application store. - Create an L{InstalledOffering. Perform all of these tasks in a transaction managed within the scope of this call (that means you should not call this function inside a transaction, or you should not handle any exceptions it raises inside an externally managed transaction). @type offering: L{IOffering} @param offering: The offering to install. @return: The C{InstalledOffering} item created. """ for off in self._siteStore.query( InstalledOffering, InstalledOffering.offeringName == offering.name): raise OfferingAlreadyInstalled(off) def siteSetup(): for (requiredInterface, requiredPowerup) in offering.siteRequirements: if requiredInterface is not None: nn = requiredInterface(self._siteStore, None) if nn is not None: continue if requiredPowerup is None: raise NotImplementedError( 'Interface %r required by %r but not provided by %r' % (requiredInterface, offering, self._siteStore)) self._siteStore.findOrCreate( requiredPowerup, lambda p: installOn(p, self._siteStore)) ls = self._siteStore.findOrCreate(userbase.LoginSystem) substoreItem = substore.SubStore.createNew( self._siteStore, ('app', offering.name + '.axiom')) ls.addAccount(offering.name, None, None, internal=True, avatars=substoreItem) from xmantissa.publicweb import PublicWeb PublicWeb(store=self._siteStore, application=substoreItem, prefixURL=offering.name) ss = substoreItem.open() def appSetup(): for pup in offering.appPowerups: installOn(pup(store=ss), ss) ss.transact(appSetup) # Woops, we need atomic cross-store transactions. io = InstalledOffering( store=self._siteStore, offeringName=offering.name, application=substoreItem) #Some new themes may be available now. Clear the theme cache #so they can show up. #XXX This is pretty terrible -- there #really should be a scheme by which ThemeCache instances can #be non-global. Fix this at the earliest opportunity. from xmantissa import webtheme webtheme.theThemeCache.emptyCache() return io return self._siteStore.transact(siteSetup)
[ "def", "installOffering", "(", "self", ",", "offering", ")", ":", "for", "off", "in", "self", ".", "_siteStore", ".", "query", "(", "InstalledOffering", ",", "InstalledOffering", ".", "offeringName", "==", "offering", ".", "name", ")", ":", "raise", "Offerin...
Install the given offering:: - Create and install the powerups in its I{siteRequirements} list. - Create an application L{Store} and a L{LoginAccount} referring to it. Install the I{appPowerups} on the application store. - Create an L{InstalledOffering. Perform all of these tasks in a transaction managed within the scope of this call (that means you should not call this function inside a transaction, or you should not handle any exceptions it raises inside an externally managed transaction). @type offering: L{IOffering} @param offering: The offering to install. @return: The C{InstalledOffering} item created.
[ "Install", "the", "given", "offering", "::" ]
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/offering.py#L121-L186
twisted/mantissa
xmantissa/offering.py
OfferingConfiguration.installOffering
def installOffering(self, offering, configuration): """ Create an app store for an L{Offering} and install its dependencies. Also create an L{InstalledOffering} in the site store, and return it. """ s = self.store.parent self.installedOfferingCount += 1 return installOffering(s, offering, configuration)
python
def installOffering(self, offering, configuration): """ Create an app store for an L{Offering} and install its dependencies. Also create an L{InstalledOffering} in the site store, and return it. """ s = self.store.parent self.installedOfferingCount += 1 return installOffering(s, offering, configuration)
[ "def", "installOffering", "(", "self", ",", "offering", ",", "configuration", ")", ":", "s", "=", "self", ".", "store", ".", "parent", "self", ".", "installedOfferingCount", "+=", "1", "return", "installOffering", "(", "s", ",", "offering", ",", "configurati...
Create an app store for an L{Offering} and install its dependencies. Also create an L{InstalledOffering} in the site store, and return it.
[ "Create", "an", "app", "store", "for", "an", "L", "{", "Offering", "}", "and", "install", "its", "dependencies", ".", "Also", "create", "an", "L", "{", "InstalledOffering", "}", "in", "the", "site", "store", "and", "return", "it", "." ]
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/offering.py#L248-L256
rande/python-simple-ioc
ioc/extra/tornado/handler.py
RouterHandler.send_file
def send_file(self, file): """ Send a file to the client, it is a convenient method to avoid duplicated code """ if self.logger: self.logger.debug("[ioc.extra.tornado.RouterHandler] send file %s" % file) self.send_file_header(file) fp = open(file, 'rb') self.write(fp.read()) fp.close()
python
def send_file(self, file): """ Send a file to the client, it is a convenient method to avoid duplicated code """ if self.logger: self.logger.debug("[ioc.extra.tornado.RouterHandler] send file %s" % file) self.send_file_header(file) fp = open(file, 'rb') self.write(fp.read()) fp.close()
[ "def", "send_file", "(", "self", ",", "file", ")", ":", "if", "self", ".", "logger", ":", "self", ".", "logger", ".", "debug", "(", "\"[ioc.extra.tornado.RouterHandler] send file %s\"", "%", "file", ")", "self", ".", "send_file_header", "(", "file", ")", "fp...
Send a file to the client, it is a convenient method to avoid duplicated code
[ "Send", "a", "file", "to", "the", "client", "it", "is", "a", "convenient", "method", "to", "avoid", "duplicated", "code" ]
train
https://github.com/rande/python-simple-ioc/blob/36ddf667c1213a07a53cd4cdd708d02494e5190b/ioc/extra/tornado/handler.py#L186-L198
pudo/jsongraph
jsongraph/triplify.py
triplify_object
def triplify_object(binding): """ Create bi-directional bindings for object relationships. """ triples = [] if binding.uri: triples.append((binding.subject, RDF.type, binding.uri)) if binding.parent is not None: parent = binding.parent.subject if binding.parent.is_array: parent = binding.parent.parent.subject triples.append((parent, binding.predicate, binding.subject)) if binding.reverse is not None: triples.append((binding.subject, binding.reverse, parent)) for prop in binding.properties: _, prop_triples = triplify(prop) triples.extend(prop_triples) return binding.subject, triples
python
def triplify_object(binding): """ Create bi-directional bindings for object relationships. """ triples = [] if binding.uri: triples.append((binding.subject, RDF.type, binding.uri)) if binding.parent is not None: parent = binding.parent.subject if binding.parent.is_array: parent = binding.parent.parent.subject triples.append((parent, binding.predicate, binding.subject)) if binding.reverse is not None: triples.append((binding.subject, binding.reverse, parent)) for prop in binding.properties: _, prop_triples = triplify(prop) triples.extend(prop_triples) return binding.subject, triples
[ "def", "triplify_object", "(", "binding", ")", ":", "triples", "=", "[", "]", "if", "binding", ".", "uri", ":", "triples", ".", "append", "(", "(", "binding", ".", "subject", ",", "RDF", ".", "type", ",", "binding", ".", "uri", ")", ")", "if", "bin...
Create bi-directional bindings for object relationships.
[ "Create", "bi", "-", "directional", "bindings", "for", "object", "relationships", "." ]
train
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/triplify.py#L4-L22
pudo/jsongraph
jsongraph/triplify.py
triplify
def triplify(binding): """ Recursively generate RDF statement triples from the data and schema supplied to the application. """ triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) return None, triples else: subject = binding.parent.subject triples.append((subject, binding.predicate, binding.object)) if binding.reverse is not None: triples.append((binding.object, binding.reverse, subject)) return subject, triples
python
def triplify(binding): """ Recursively generate RDF statement triples from the data and schema supplied to the application. """ triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) return None, triples else: subject = binding.parent.subject triples.append((subject, binding.predicate, binding.object)) if binding.reverse is not None: triples.append((binding.object, binding.reverse, subject)) return subject, triples
[ "def", "triplify", "(", "binding", ")", ":", "triples", "=", "[", "]", "if", "binding", ".", "data", "is", "None", ":", "return", "None", ",", "triples", "if", "binding", ".", "is_object", ":", "return", "triplify_object", "(", "binding", ")", "elif", ...
Recursively generate RDF statement triples from the data and schema supplied to the application.
[ "Recursively", "generate", "RDF", "statement", "triples", "from", "the", "data", "and", "schema", "supplied", "to", "the", "application", "." ]
train
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/triplify.py#L25-L44
openvax/datacache
datacache/database_types.py
_candidate_type_names
def _candidate_type_names(python_type_representation): """Generator which yields possible type names to look up in the conversion dictionary. Parameters ---------- python_type_representation : object Any Python object which represents a type, such as `int`, `dtype('int8')`, `np.int8`, or `"int8"`. """ # if we get a single character code we should normalize to a NumPy type # using np.typeDict, which maps string representations of types to NumPy # type objects if python_type_representation in np.typeDict: python_type_representation = np.typeDict[python_type_representation] yield python_type_representation.__name__ # if we get a dtype object i.e. dtype('int16'), then pull out its name if hasattr(python_type_representation, 'name'): yield python_type_representation.name # convert Python types by adding their type's name if hasattr(python_type_representation, '__name__'): yield python_type_representation.__name__ # for a dtype like dtype('S3') need to access dtype.type.__name__ # to get 'string_' if hasattr(python_type_representation, 'type'): if hasattr(python_type_representation.type, '__name__'): yield python_type_representation.type.__name__ yield str(python_type_representation)
python
def _candidate_type_names(python_type_representation): """Generator which yields possible type names to look up in the conversion dictionary. Parameters ---------- python_type_representation : object Any Python object which represents a type, such as `int`, `dtype('int8')`, `np.int8`, or `"int8"`. """ # if we get a single character code we should normalize to a NumPy type # using np.typeDict, which maps string representations of types to NumPy # type objects if python_type_representation in np.typeDict: python_type_representation = np.typeDict[python_type_representation] yield python_type_representation.__name__ # if we get a dtype object i.e. dtype('int16'), then pull out its name if hasattr(python_type_representation, 'name'): yield python_type_representation.name # convert Python types by adding their type's name if hasattr(python_type_representation, '__name__'): yield python_type_representation.__name__ # for a dtype like dtype('S3') need to access dtype.type.__name__ # to get 'string_' if hasattr(python_type_representation, 'type'): if hasattr(python_type_representation.type, '__name__'): yield python_type_representation.type.__name__ yield str(python_type_representation)
[ "def", "_candidate_type_names", "(", "python_type_representation", ")", ":", "# if we get a single character code we should normalize to a NumPy type", "# using np.typeDict, which maps string representations of types to NumPy", "# type objects", "if", "python_type_representation", "in", "np"...
Generator which yields possible type names to look up in the conversion dictionary. Parameters ---------- python_type_representation : object Any Python object which represents a type, such as `int`, `dtype('int8')`, `np.int8`, or `"int8"`.
[ "Generator", "which", "yields", "possible", "type", "names", "to", "look", "up", "in", "the", "conversion", "dictionary", "." ]
train
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/database_types.py#L51-L82
openvax/datacache
datacache/database_types.py
db_type
def db_type(python_type_representation): """ Converts from any of: (1) Python type (2) NumPy/Pandas dtypes (3) string names of types ...to a sqlite3 type name """ for type_name in _candidate_type_names(python_type_representation): db_type_name = _lookup_type_name(type_name) if db_type_name: return db_type_name raise ValueError("Failed to find sqlite3 column type for %s" % ( python_type_representation))
python
def db_type(python_type_representation): """ Converts from any of: (1) Python type (2) NumPy/Pandas dtypes (3) string names of types ...to a sqlite3 type name """ for type_name in _candidate_type_names(python_type_representation): db_type_name = _lookup_type_name(type_name) if db_type_name: return db_type_name raise ValueError("Failed to find sqlite3 column type for %s" % ( python_type_representation))
[ "def", "db_type", "(", "python_type_representation", ")", ":", "for", "type_name", "in", "_candidate_type_names", "(", "python_type_representation", ")", ":", "db_type_name", "=", "_lookup_type_name", "(", "type_name", ")", "if", "db_type_name", ":", "return", "db_typ...
Converts from any of: (1) Python type (2) NumPy/Pandas dtypes (3) string names of types ...to a sqlite3 type name
[ "Converts", "from", "any", "of", ":", "(", "1", ")", "Python", "type", "(", "2", ")", "NumPy", "/", "Pandas", "dtypes", "(", "3", ")", "string", "names", "of", "types", "...", "to", "a", "sqlite3", "type", "name" ]
train
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/database_types.py#L84-L97
jwodder/doapi
doapi/droplet.py
Droplet.ip_address
def ip_address(self): """ The IP address of the first interface listed in the droplet's ``networks`` field (ordering IPv4 before IPv6), or `None` if there are no interfaces """ networks = self.get("networks", {}) v4nets = networks.get("v4", []) v6nets = networks.get("v6", []) try: return (v4nets + v6nets)[0].ip_address except IndexError: return None
python
def ip_address(self): """ The IP address of the first interface listed in the droplet's ``networks`` field (ordering IPv4 before IPv6), or `None` if there are no interfaces """ networks = self.get("networks", {}) v4nets = networks.get("v4", []) v6nets = networks.get("v6", []) try: return (v4nets + v6nets)[0].ip_address except IndexError: return None
[ "def", "ip_address", "(", "self", ")", ":", "networks", "=", "self", ".", "get", "(", "\"networks\"", ",", "{", "}", ")", "v4nets", "=", "networks", ".", "get", "(", "\"v4\"", ",", "[", "]", ")", "v6nets", "=", "networks", ".", "get", "(", "\"v6\""...
The IP address of the first interface listed in the droplet's ``networks`` field (ordering IPv4 before IPv6), or `None` if there are no interfaces
[ "The", "IP", "address", "of", "the", "first", "interface", "listed", "in", "the", "droplet", "s", "networks", "field", "(", "ordering", "IPv4", "before", "IPv6", ")", "or", "None", "if", "there", "are", "no", "interfaces" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L141-L153
jwodder/doapi
doapi/droplet.py
Droplet.fetch
def fetch(self): """ Fetch & return a new `Droplet` object representing the droplet's current state :rtype: Droplet :raises DOAPIError: if the API endpoint replies with an error (e.g., if the droplet no longer exists) """ api = self.doapi_manager return api._droplet(api.request(self.url)["droplet"])
python
def fetch(self): """ Fetch & return a new `Droplet` object representing the droplet's current state :rtype: Droplet :raises DOAPIError: if the API endpoint replies with an error (e.g., if the droplet no longer exists) """ api = self.doapi_manager return api._droplet(api.request(self.url)["droplet"])
[ "def", "fetch", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "return", "api", ".", "_droplet", "(", "api", ".", "request", "(", "self", ".", "url", ")", "[", "\"droplet\"", "]", ")" ]
Fetch & return a new `Droplet` object representing the droplet's current state :rtype: Droplet :raises DOAPIError: if the API endpoint replies with an error (e.g., if the droplet no longer exists)
[ "Fetch", "&", "return", "a", "new", "Droplet", "object", "representing", "the", "droplet", "s", "current", "state" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L186-L196
jwodder/doapi
doapi/droplet.py
Droplet.fetch_all_neighbors
def fetch_all_neighbors(self): r""" Returns a generator that yields all of the droplets running on the same physical server as the droplet :rtype: generator of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return map(api._droplet, api.paginate(self.url + '/neighbors', 'droplets'))
python
def fetch_all_neighbors(self): r""" Returns a generator that yields all of the droplets running on the same physical server as the droplet :rtype: generator of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return map(api._droplet, api.paginate(self.url + '/neighbors', 'droplets'))
[ "def", "fetch_all_neighbors", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "return", "map", "(", "api", ".", "_droplet", ",", "api", ".", "paginate", "(", "self", ".", "url", "+", "'/neighbors'", ",", "'droplets'", ")", ")" ]
r""" Returns a generator that yields all of the droplets running on the same physical server as the droplet :rtype: generator of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "that", "yields", "all", "of", "the", "droplets", "running", "on", "the", "same", "physical", "server", "as", "the", "droplet" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L198-L208
jwodder/doapi
doapi/droplet.py
Droplet.fetch_all_snapshots
def fetch_all_snapshots(self): r""" Returns a generator that yields all of the snapshot images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/snapshots', 'snapshots'): yield Image(obj, doapi_manager=api)
python
def fetch_all_snapshots(self): r""" Returns a generator that yields all of the snapshot images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/snapshots', 'snapshots'): yield Image(obj, doapi_manager=api)
[ "def", "fetch_all_snapshots", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "for", "obj", "in", "api", ".", "paginate", "(", "self", ".", "url", "+", "'/snapshots'", ",", "'snapshots'", ")", ":", "yield", "Image", "(", "obj", ",", "d...
r""" Returns a generator that yields all of the snapshot images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "that", "yields", "all", "of", "the", "snapshot", "images", "created", "from", "the", "droplet" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L210-L220
jwodder/doapi
doapi/droplet.py
Droplet.fetch_all_backups
def fetch_all_backups(self): r""" Returns a generator that yields all of the backup images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/backups', 'backups'): yield Image(obj, doapi_manager=api)
python
def fetch_all_backups(self): r""" Returns a generator that yields all of the backup images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/backups', 'backups'): yield Image(obj, doapi_manager=api)
[ "def", "fetch_all_backups", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "for", "obj", "in", "api", ".", "paginate", "(", "self", ".", "url", "+", "'/backups'", ",", "'backups'", ")", ":", "yield", "Image", "(", "obj", ",", "doapi_m...
r""" Returns a generator that yields all of the backup images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "that", "yields", "all", "of", "the", "backup", "images", "created", "from", "the", "droplet" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L222-L232
jwodder/doapi
doapi/droplet.py
Droplet.fetch_all_kernels
def fetch_all_kernels(self): r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for kern in api.paginate(self.url + '/kernels', 'kernels'): yield Kernel(kern, doapi_manager=api)
python
def fetch_all_kernels(self): r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for kern in api.paginate(self.url + '/kernels', 'kernels'): yield Kernel(kern, doapi_manager=api)
[ "def", "fetch_all_kernels", "(", "self", ")", ":", "api", "=", "self", ".", "doapi_manager", "for", "kern", "in", "api", ".", "paginate", "(", "self", ".", "url", "+", "'/kernels'", ",", "'kernels'", ")", ":", "yield", "Kernel", "(", "kern", ",", "doap...
r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error
[ "r", "Returns", "a", "generator", "that", "yields", "all", "of", "the", "kernels", "available", "to", "the", "droplet" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L234-L244
jwodder/doapi
doapi/droplet.py
Droplet.restore
def restore(self, image): """ Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='restore', image=image)
python
def restore(self, image): """ Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='restore', image=image)
[ "def", "restore", "(", "self", ",", "image", ")", ":", "if", "isinstance", "(", "image", ",", "Image", ")", ":", "image", "=", "image", ".", "id", "return", "self", ".", "act", "(", "type", "=", "'restore'", ",", "image", "=", "image", ")" ]
Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
[ "Restore", "the", "droplet", "to", "the", "specified", "backup", "image" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L343-L362
jwodder/doapi
doapi/droplet.py
Droplet.resize
def resize(self, size, disk=None): """ Resize the droplet :param size: a size slug or a `Size` object representing the size to resize to :type size: string or `Size` :param bool disk: Set to `True` for a permanent resize, including disk changes :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(size, Size): size = size.slug opts = {"disk": disk} if disk is not None else {} return self.act(type='resize', size=size, **opts)
python
def resize(self, size, disk=None): """ Resize the droplet :param size: a size slug or a `Size` object representing the size to resize to :type size: string or `Size` :param bool disk: Set to `True` for a permanent resize, including disk changes :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(size, Size): size = size.slug opts = {"disk": disk} if disk is not None else {} return self.act(type='resize', size=size, **opts)
[ "def", "resize", "(", "self", ",", "size", ",", "disk", "=", "None", ")", ":", "if", "isinstance", "(", "size", ",", "Size", ")", ":", "size", "=", "size", ".", "slug", "opts", "=", "{", "\"disk\"", ":", "disk", "}", "if", "disk", "is", "not", ...
Resize the droplet :param size: a size slug or a `Size` object representing the size to resize to :type size: string or `Size` :param bool disk: Set to `True` for a permanent resize, including disk changes :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
[ "Resize", "the", "droplet" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L375-L392
jwodder/doapi
doapi/droplet.py
Droplet.rebuild
def rebuild(self, image): """ Rebuild the droplet with the specified image A rebuild action functions just like a new create. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing the image the droplet should use as a base :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='rebuild', image=image)
python
def rebuild(self, image): """ Rebuild the droplet with the specified image A rebuild action functions just like a new create. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing the image the droplet should use as a base :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='rebuild', image=image)
[ "def", "rebuild", "(", "self", ",", "image", ")", ":", "if", "isinstance", "(", "image", ",", "Image", ")", ":", "image", "=", "image", ".", "id", "return", "self", ".", "act", "(", "type", "=", "'rebuild'", ",", "image", "=", "image", ")" ]
Rebuild the droplet with the specified image A rebuild action functions just like a new create. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing the image the droplet should use as a base :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
[ "Rebuild", "the", "droplet", "with", "the", "specified", "image" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L394-L410
jwodder/doapi
doapi/droplet.py
Droplet.change_kernel
def change_kernel(self, kernel): """ Change the droplet's kernel :param kernel: a kernel ID or `Kernel` object representing the new kernel :type kernel: integer or `Kernel` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(kernel, Kernel): kernel = kernel.id return self.act(type='change_kernel', kernel=kernel)
python
def change_kernel(self, kernel): """ Change the droplet's kernel :param kernel: a kernel ID or `Kernel` object representing the new kernel :type kernel: integer or `Kernel` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(kernel, Kernel): kernel = kernel.id return self.act(type='change_kernel', kernel=kernel)
[ "def", "change_kernel", "(", "self", ",", "kernel", ")", ":", "if", "isinstance", "(", "kernel", ",", "Kernel", ")", ":", "kernel", "=", "kernel", ".", "id", "return", "self", ".", "act", "(", "type", "=", "'change_kernel'", ",", "kernel", "=", "kernel...
Change the droplet's kernel :param kernel: a kernel ID or `Kernel` object representing the new kernel :type kernel: integer or `Kernel` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
[ "Change", "the", "droplet", "s", "kernel" ]
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L424-L438
jwodder/doapi
doapi/droplet.py
Droplet.wait
def wait(self, status=None, locked=None, wait_interval=None, wait_time=None): """ Poll the server periodically until the droplet has reached some final state. If ``status`` is non-`None`, ``wait`` will wait for the droplet's ``status`` field to equal the given value. If ``locked`` is non-`None`, `wait` will wait for the droplet's ``locked`` field to equal (the truth value of) the given value. Exactly one of ``status`` and ``locked`` must be non-`None`. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing the droplet's most recently fetched state) is raised. If a `KeyboardInterrupt` is caught, the droplet's most recently fetched state is returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout .. versionchanged:: 0.2.0 ``locked`` parameter added .. versionchanged:: 0.2.0 No longer waits for latest action to complete :param status: When non-`None`, the desired value for the ``status`` field of the droplet, which should be one of `Droplet.STATUS_ACTIVE`, `Droplet.STATUS_ARCHIVE`, `Droplet.STATUS_NEW`, and `Droplet.STATUS_OFF`. (For the sake of forwards-compatibility, any other value is accepted as well.) :type status: string or `None` :param locked: When non-`None`, the desired value for the ``locked`` field of the droplet :type locked: `bool` or `None` :param number wait_interval: how many seconds to sleep between requests; defaults to the `doapi` object's :attr:`~doapi.wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if the droplet has not yet completed, or a negative number to wait indefinitely; defaults to the `doapi` object's :attr:`~doapi.wait_time` if not specified or `None` :return: the droplet's final state :rtype: Droplet :raises TypeError: if both or neither of ``status`` & ``locked`` are defined :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ return next(self.doapi_manager.wait_droplets([self], status, locked, wait_interval, wait_time))
python
def wait(self, status=None, locked=None, wait_interval=None, wait_time=None): """ Poll the server periodically until the droplet has reached some final state. If ``status`` is non-`None`, ``wait`` will wait for the droplet's ``status`` field to equal the given value. If ``locked`` is non-`None`, `wait` will wait for the droplet's ``locked`` field to equal (the truth value of) the given value. Exactly one of ``status`` and ``locked`` must be non-`None`. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing the droplet's most recently fetched state) is raised. If a `KeyboardInterrupt` is caught, the droplet's most recently fetched state is returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout .. versionchanged:: 0.2.0 ``locked`` parameter added .. versionchanged:: 0.2.0 No longer waits for latest action to complete :param status: When non-`None`, the desired value for the ``status`` field of the droplet, which should be one of `Droplet.STATUS_ACTIVE`, `Droplet.STATUS_ARCHIVE`, `Droplet.STATUS_NEW`, and `Droplet.STATUS_OFF`. (For the sake of forwards-compatibility, any other value is accepted as well.) :type status: string or `None` :param locked: When non-`None`, the desired value for the ``locked`` field of the droplet :type locked: `bool` or `None` :param number wait_interval: how many seconds to sleep between requests; defaults to the `doapi` object's :attr:`~doapi.wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if the droplet has not yet completed, or a negative number to wait indefinitely; defaults to the `doapi` object's :attr:`~doapi.wait_time` if not specified or `None` :return: the droplet's final state :rtype: Droplet :raises TypeError: if both or neither of ``status`` & ``locked`` are defined :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ return next(self.doapi_manager.wait_droplets([self], status, locked, wait_interval, wait_time))
[ "def", "wait", "(", "self", ",", "status", "=", "None", ",", "locked", "=", "None", ",", "wait_interval", "=", "None", ",", "wait_time", "=", "None", ")", ":", "return", "next", "(", "self", ".", "doapi_manager", ".", "wait_droplets", "(", "[", "self",...
Poll the server periodically until the droplet has reached some final state. If ``status`` is non-`None`, ``wait`` will wait for the droplet's ``status`` field to equal the given value. If ``locked`` is non-`None`, `wait` will wait for the droplet's ``locked`` field to equal (the truth value of) the given value. Exactly one of ``status`` and ``locked`` must be non-`None`. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing the droplet's most recently fetched state) is raised. If a `KeyboardInterrupt` is caught, the droplet's most recently fetched state is returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout .. versionchanged:: 0.2.0 ``locked`` parameter added .. versionchanged:: 0.2.0 No longer waits for latest action to complete :param status: When non-`None`, the desired value for the ``status`` field of the droplet, which should be one of `Droplet.STATUS_ACTIVE`, `Droplet.STATUS_ARCHIVE`, `Droplet.STATUS_NEW`, and `Droplet.STATUS_OFF`. (For the sake of forwards-compatibility, any other value is accepted as well.) :type status: string or `None` :param locked: When non-`None`, the desired value for the ``locked`` field of the droplet :type locked: `bool` or `None` :param number wait_interval: how many seconds to sleep between requests; defaults to the `doapi` object's :attr:`~doapi.wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if the droplet has not yet completed, or a negative number to wait indefinitely; defaults to the `doapi` object's :attr:`~doapi.wait_time` if not specified or `None` :return: the droplet's final state :rtype: Droplet :raises TypeError: if both or neither of ``status`` & ``locked`` are defined :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded
[ "Poll", "the", "server", "periodically", "until", "the", "droplet", "has", "reached", "some", "final", "state", ".", "If", "status", "is", "non", "-", "None", "wait", "will", "wait", "for", "the", "droplet", "s", "status", "field", "to", "equal", "the", ...
train
https://github.com/jwodder/doapi/blob/b1306de86a01d8ae7b9c1fe2699765bb82e4f310/doapi/droplet.py#L483-L532
inveniosoftware/invenio-openaire
invenio_openaire/minters.py
minter
def minter(record_uuid, data, pid_type, key): """Mint PIDs for a record.""" pid = PersistentIdentifier.create( pid_type, data[key], object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) for scheme, identifier in data['identifiers'].items(): if identifier: PersistentIdentifier.create( scheme, identifier, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) return pid
python
def minter(record_uuid, data, pid_type, key): """Mint PIDs for a record.""" pid = PersistentIdentifier.create( pid_type, data[key], object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) for scheme, identifier in data['identifiers'].items(): if identifier: PersistentIdentifier.create( scheme, identifier, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) return pid
[ "def", "minter", "(", "record_uuid", ",", "data", ",", "pid_type", ",", "key", ")", ":", "pid", "=", "PersistentIdentifier", ".", "create", "(", "pid_type", ",", "data", "[", "key", "]", ",", "object_type", "=", "'rec'", ",", "object_uuid", "=", "record_...
Mint PIDs for a record.
[ "Mint", "PIDs", "for", "a", "record", "." ]
train
https://github.com/inveniosoftware/invenio-openaire/blob/71860effff6abe7f658d3a11894e064202ef1c36/invenio_openaire/minters.py#L42-L60
miku/gluish
gluish/utils.py
date_range
def date_range(start_date, end_date, increment, period): """ Generate `date` objects between `start_date` and `end_date` in `increment` `period` intervals. """ next = start_date delta = relativedelta.relativedelta(**{period:increment}) while next <= end_date: yield next next += delta
python
def date_range(start_date, end_date, increment, period): """ Generate `date` objects between `start_date` and `end_date` in `increment` `period` intervals. """ next = start_date delta = relativedelta.relativedelta(**{period:increment}) while next <= end_date: yield next next += delta
[ "def", "date_range", "(", "start_date", ",", "end_date", ",", "increment", ",", "period", ")", ":", "next", "=", "start_date", "delta", "=", "relativedelta", ".", "relativedelta", "(", "*", "*", "{", "period", ":", "increment", "}", ")", "while", "next", ...
Generate `date` objects between `start_date` and `end_date` in `increment` `period` intervals.
[ "Generate", "date", "objects", "between", "start_date", "and", "end_date", "in", "increment", "period", "intervals", "." ]
train
https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/utils.py#L55-L64
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_binaries.py
GaP_Eg_Gamma
def GaP_Eg_Gamma(self, **kwargs): ''' Returns the Gamma-valley bandgap, Eg_Gamma, in electron Volts at a given temperature, T, in Kelvin (default: 300 K). GaP has a unique Gamma-gap temperature dependence. ''' T = kwargs.get('T', 300.) if T < 1e-4: return self.Eg_Gamma_0() return self.Eg_Gamma_0() + 0.1081 * (1 - 1. / tanh(164. / T)) # eV
python
def GaP_Eg_Gamma(self, **kwargs): ''' Returns the Gamma-valley bandgap, Eg_Gamma, in electron Volts at a given temperature, T, in Kelvin (default: 300 K). GaP has a unique Gamma-gap temperature dependence. ''' T = kwargs.get('T', 300.) if T < 1e-4: return self.Eg_Gamma_0() return self.Eg_Gamma_0() + 0.1081 * (1 - 1. / tanh(164. / T)) # eV
[ "def", "GaP_Eg_Gamma", "(", "self", ",", "*", "*", "kwargs", ")", ":", "T", "=", "kwargs", ".", "get", "(", "'T'", ",", "300.", ")", "if", "T", "<", "1e-4", ":", "return", "self", ".", "Eg_Gamma_0", "(", ")", "return", "self", ".", "Eg_Gamma_0", ...
Returns the Gamma-valley bandgap, Eg_Gamma, in electron Volts at a given temperature, T, in Kelvin (default: 300 K). GaP has a unique Gamma-gap temperature dependence.
[ "Returns", "the", "Gamma", "-", "valley", "bandgap", "Eg_Gamma", "in", "electron", "Volts", "at", "a", "given", "temperature", "T", "in", "Kelvin", "(", "default", ":", "300", "K", ")", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_binaries.py#L190-L200
thibault/django-nexmo
djexmo/utils.py
send_message
def send_message(frm=None, to=None, text=None): """Shortcut to send a sms using libnexmo api. :param frm: The originator of the message :param to: The message's recipient :param text: The text message body Example usage: >>> send_message(to='+33123456789', text='My sms message body') """ assert to is not None assert text is not None if frm is None: frm = settings.NEXMO_DEFAULT_FROM client = nexmo.Client(key=settings.NEXMO_API_KEY, secret=settings.NEXMO_API_SECRET) response = client.send_message({ 'from': frm, 'to': to, 'text': text }) return response
python
def send_message(frm=None, to=None, text=None): """Shortcut to send a sms using libnexmo api. :param frm: The originator of the message :param to: The message's recipient :param text: The text message body Example usage: >>> send_message(to='+33123456789', text='My sms message body') """ assert to is not None assert text is not None if frm is None: frm = settings.NEXMO_DEFAULT_FROM client = nexmo.Client(key=settings.NEXMO_API_KEY, secret=settings.NEXMO_API_SECRET) response = client.send_message({ 'from': frm, 'to': to, 'text': text }) return response
[ "def", "send_message", "(", "frm", "=", "None", ",", "to", "=", "None", ",", "text", "=", "None", ")", ":", "assert", "to", "is", "not", "None", "assert", "text", "is", "not", "None", "if", "frm", "is", "None", ":", "frm", "=", "settings", ".", "...
Shortcut to send a sms using libnexmo api. :param frm: The originator of the message :param to: The message's recipient :param text: The text message body Example usage: >>> send_message(to='+33123456789', text='My sms message body')
[ "Shortcut", "to", "send", "a", "sms", "using", "libnexmo", "api", "." ]
train
https://github.com/thibault/django-nexmo/blob/6cab80c96b85fdcbb03ddab5ad1a01440be4992d/djexmo/utils.py#L10-L34
kejbaly2/metrique
metrique/result.py
filtered
def filtered(f): ''' Decorator function that wraps functions returning pandas dataframes, such that the dataframe is filtered according to left and right bounds set. ''' def _filter(f, self, *args, **kwargs): frame = f(self, *args, **kwargs) ret = type(self)(frame) ret._lbound = self._lbound ret._rbound = self._rbound return ret if HAS_DECORATOR: return decorator(_filter, f) else: def err_func(*args, **kwargs): raise RuntimeError("`pip install decorator` required") return err_func
python
def filtered(f): ''' Decorator function that wraps functions returning pandas dataframes, such that the dataframe is filtered according to left and right bounds set. ''' def _filter(f, self, *args, **kwargs): frame = f(self, *args, **kwargs) ret = type(self)(frame) ret._lbound = self._lbound ret._rbound = self._rbound return ret if HAS_DECORATOR: return decorator(_filter, f) else: def err_func(*args, **kwargs): raise RuntimeError("`pip install decorator` required") return err_func
[ "def", "filtered", "(", "f", ")", ":", "def", "_filter", "(", "f", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "frame", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "ret", "=", "type", "(", "s...
Decorator function that wraps functions returning pandas dataframes, such that the dataframe is filtered according to left and right bounds set.
[ "Decorator", "function", "that", "wraps", "functions", "returning", "pandas", "dataframes", "such", "that", "the", "dataframe", "is", "filtered", "according", "to", "left", "and", "right", "bounds", "set", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L54-L73
kejbaly2/metrique
metrique/result.py
Result.to_datetime
def to_datetime(self, column): ''' This function converts epoch timestamps to datetimes. :param column: column to convert from current state -> datetime ''' if column in self: if self[column].dtype in NUMPY_NUMERICAL: self[column] = pd.to_datetime(self[column], unit='s') else: self[column] = pd.to_datetime(self[column], utc=True)
python
def to_datetime(self, column): ''' This function converts epoch timestamps to datetimes. :param column: column to convert from current state -> datetime ''' if column in self: if self[column].dtype in NUMPY_NUMERICAL: self[column] = pd.to_datetime(self[column], unit='s') else: self[column] = pd.to_datetime(self[column], utc=True)
[ "def", "to_datetime", "(", "self", ",", "column", ")", ":", "if", "column", "in", "self", ":", "if", "self", "[", "column", "]", ".", "dtype", "in", "NUMPY_NUMERICAL", ":", "self", "[", "column", "]", "=", "pd", ".", "to_datetime", "(", "self", "[", ...
This function converts epoch timestamps to datetimes. :param column: column to convert from current state -> datetime
[ "This", "function", "converts", "epoch", "timestamps", "to", "datetimes", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L122-L132
kejbaly2/metrique
metrique/result.py
Result.set_date_bounds
def set_date_bounds(self, date): ''' Pass in the date used in the original query. :param date: Date (date range) that was queried: date -> 'd', '~d', 'd~', 'd~d' d -> '%Y-%m-%d %H:%M:%S,%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d' ''' if date is not None: split = date.split('~') if len(split) == 1: self._lbound = ts2dt(date) self._rbound = ts2dt(date) elif len(split) == 2: if split[0] != '': self._lbound = ts2dt(split[0]) if split[1] != '': self._rbound = ts2dt(split[1]) else: raise Exception('Date %s is not in the correct format' % date)
python
def set_date_bounds(self, date): ''' Pass in the date used in the original query. :param date: Date (date range) that was queried: date -> 'd', '~d', 'd~', 'd~d' d -> '%Y-%m-%d %H:%M:%S,%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d' ''' if date is not None: split = date.split('~') if len(split) == 1: self._lbound = ts2dt(date) self._rbound = ts2dt(date) elif len(split) == 2: if split[0] != '': self._lbound = ts2dt(split[0]) if split[1] != '': self._rbound = ts2dt(split[1]) else: raise Exception('Date %s is not in the correct format' % date)
[ "def", "set_date_bounds", "(", "self", ",", "date", ")", ":", "if", "date", "is", "not", "None", ":", "split", "=", "date", ".", "split", "(", "'~'", ")", "if", "len", "(", "split", ")", "==", "1", ":", "self", ".", "_lbound", "=", "ts2dt", "(", ...
Pass in the date used in the original query. :param date: Date (date range) that was queried: date -> 'd', '~d', 'd~', 'd~d' d -> '%Y-%m-%d %H:%M:%S,%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d'
[ "Pass", "in", "the", "date", "used", "in", "the", "original", "query", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L134-L153
kejbaly2/metrique
metrique/result.py
Result.check_in_bounds
def check_in_bounds(self, date): '''Check that left and right bounds are sane :param date: date to validate left/right bounds for ''' dt = Timestamp(date) return ((self._lbound is None or dt >= self._lbound) and (self._rbound is None or dt <= self._rbound))
python
def check_in_bounds(self, date): '''Check that left and right bounds are sane :param date: date to validate left/right bounds for ''' dt = Timestamp(date) return ((self._lbound is None or dt >= self._lbound) and (self._rbound is None or dt <= self._rbound))
[ "def", "check_in_bounds", "(", "self", ",", "date", ")", ":", "dt", "=", "Timestamp", "(", "date", ")", "return", "(", "(", "self", ".", "_lbound", "is", "None", "or", "dt", ">=", "self", ".", "_lbound", ")", "and", "(", "self", ".", "_rbound", "is...
Check that left and right bounds are sane :param date: date to validate left/right bounds for
[ "Check", "that", "left", "and", "right", "bounds", "are", "sane" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L155-L162
kejbaly2/metrique
metrique/result.py
Result.on_date
def on_date(self, date, only_count=False): ''' Filters out only the rows that match the spectified date. Works only on a Result that has _start and _end columns. :param date: date can be anything Pandas.Timestamp supports parsing :param only_count: return back only the match count ''' if not self.check_in_bounds(date): raise ValueError('Date %s is not in the queried range.' % date) date = Timestamp(date) after_start = self._start <= date before_end = (self._end > date) | self._end_isnull if only_count: return np.sum(before_end & after_start) else: return self.filter(before_end & after_start)
python
def on_date(self, date, only_count=False): ''' Filters out only the rows that match the spectified date. Works only on a Result that has _start and _end columns. :param date: date can be anything Pandas.Timestamp supports parsing :param only_count: return back only the match count ''' if not self.check_in_bounds(date): raise ValueError('Date %s is not in the queried range.' % date) date = Timestamp(date) after_start = self._start <= date before_end = (self._end > date) | self._end_isnull if only_count: return np.sum(before_end & after_start) else: return self.filter(before_end & after_start)
[ "def", "on_date", "(", "self", ",", "date", ",", "only_count", "=", "False", ")", ":", "if", "not", "self", ".", "check_in_bounds", "(", "date", ")", ":", "raise", "ValueError", "(", "'Date %s is not in the queried range.'", "%", "date", ")", "date", "=", ...
Filters out only the rows that match the spectified date. Works only on a Result that has _start and _end columns. :param date: date can be anything Pandas.Timestamp supports parsing :param only_count: return back only the match count
[ "Filters", "out", "only", "the", "rows", "that", "match", "the", "spectified", "date", ".", "Works", "only", "on", "a", "Result", "that", "has", "_start", "and", "_end", "columns", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L164-L180
kejbaly2/metrique
metrique/result.py
Result.history
def history(self, dates=None, linreg_since=None, lin_reg_days=20): ''' Works only on a Result that has _start and _end columns. :param dates: list of dates to query :param linreg_since: estimate future values using linear regression. :param lin_reg_days: number of past days to use as prediction basis ''' dates = dates or self.get_dates_range() vals = [self.on_date(dt, only_count=True) for dt in dates] ret = Series(vals, index=dates) if linreg_since is not None: ret = self._linreg_future(ret, linreg_since, lin_reg_days) return ret.sort_index()
python
def history(self, dates=None, linreg_since=None, lin_reg_days=20): ''' Works only on a Result that has _start and _end columns. :param dates: list of dates to query :param linreg_since: estimate future values using linear regression. :param lin_reg_days: number of past days to use as prediction basis ''' dates = dates or self.get_dates_range() vals = [self.on_date(dt, only_count=True) for dt in dates] ret = Series(vals, index=dates) if linreg_since is not None: ret = self._linreg_future(ret, linreg_since, lin_reg_days) return ret.sort_index()
[ "def", "history", "(", "self", ",", "dates", "=", "None", ",", "linreg_since", "=", "None", ",", "lin_reg_days", "=", "20", ")", ":", "dates", "=", "dates", "or", "self", ".", "get_dates_range", "(", ")", "vals", "=", "[", "self", ".", "on_date", "("...
Works only on a Result that has _start and _end columns. :param dates: list of dates to query :param linreg_since: estimate future values using linear regression. :param lin_reg_days: number of past days to use as prediction basis
[ "Works", "only", "on", "a", "Result", "that", "has", "_start", "and", "_end", "columns", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L182-L195
kejbaly2/metrique
metrique/result.py
Result._linreg_future
def _linreg_future(self, series, since, days=20): ''' Predicts future using linear regression. :param series: A series in which the values will be places. The index will not be touched. Only the values on dates > `since` will be predicted. :param since: The starting date from which the future will be predicted. :param days: Specifies how many past days should be used in the linear regression. ''' last_days = pd.date_range(end=since, periods=days) hist = self.history(last_days) xi = np.array(map(dt2ts, hist.index)) A = np.array([xi, np.ones(len(hist))]) y = hist.values w = np.linalg.lstsq(A.T, y)[0] for d in series.index[series.index > since]: series[d] = w[0] * dt2ts(d) + w[1] series[d] = 0 if series[d] < 0 else series[d] return series
python
def _linreg_future(self, series, since, days=20): ''' Predicts future using linear regression. :param series: A series in which the values will be places. The index will not be touched. Only the values on dates > `since` will be predicted. :param since: The starting date from which the future will be predicted. :param days: Specifies how many past days should be used in the linear regression. ''' last_days = pd.date_range(end=since, periods=days) hist = self.history(last_days) xi = np.array(map(dt2ts, hist.index)) A = np.array([xi, np.ones(len(hist))]) y = hist.values w = np.linalg.lstsq(A.T, y)[0] for d in series.index[series.index > since]: series[d] = w[0] * dt2ts(d) + w[1] series[d] = 0 if series[d] < 0 else series[d] return series
[ "def", "_linreg_future", "(", "self", ",", "series", ",", "since", ",", "days", "=", "20", ")", ":", "last_days", "=", "pd", ".", "date_range", "(", "end", "=", "since", ",", "periods", "=", "days", ")", "hist", "=", "self", ".", "history", "(", "l...
Predicts future using linear regression. :param series: A series in which the values will be places. The index will not be touched. Only the values on dates > `since` will be predicted. :param since: The starting date from which the future will be predicted. :param days: Specifies how many past days should be used in the linear regression.
[ "Predicts", "future", "using", "linear", "regression", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L197-L223
kejbaly2/metrique
metrique/result.py
Result.get_dates_range
def get_dates_range(self, scale='auto', start=None, end=None, date_max='2010-01-01'): ''' Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included ''' if scale not in ['auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly']: raise ValueError('Incorrect scale: %s' % scale) start = Timestamp(start or self._start.min() or date_max) # FIXME: start != start is true for NaN objects... is NaT the same? start = Timestamp(date_max) if repr(start) == 'NaT' else start end = Timestamp(end or max(Timestamp(self._end.max()), self._start.max())) # FIXME: end != end ? end = datetime.utcnow() if repr(end) == 'NaT' else end start = start if self.check_in_bounds(start) else self._lbound end = end if self.check_in_bounds(end) else self._rbound if scale == 'auto': scale = self._auto_select_scale(start, end) if scale == 'maximum': start_dts = list(self._start.dropna().values) end_dts = list(self._end.dropna().values) dts = map(Timestamp, set(start_dts + end_dts)) dts = filter(lambda ts: self.check_in_bounds(ts) and ts >= start and ts <= end, dts) return dts freq = dict(daily='D', weekly='W', monthly='M', quarterly='3M', yearly='12M') offset = dict(daily=off.Day(n=0), weekly=off.Week(), monthly=off.MonthEnd(), quarterly=off.QuarterEnd(), yearly=off.YearEnd()) # for some reason, weekly date range gives one week less: end_ = end + off.Week() if scale == 'weekly' else end ret = list(pd.date_range(start + offset[scale], end_, freq=freq[scale])) ret = [dt for dt in ret if dt <= end] ret = [start] + ret if ret and start < ret[0] else ret ret = ret + [end] if ret and end > ret[-1] else ret ret = filter(lambda ts: self.check_in_bounds(ts), ret) return ret
python
def get_dates_range(self, scale='auto', start=None, end=None, date_max='2010-01-01'): ''' Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included ''' if scale not in ['auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly']: raise ValueError('Incorrect scale: %s' % scale) start = Timestamp(start or self._start.min() or date_max) # FIXME: start != start is true for NaN objects... is NaT the same? start = Timestamp(date_max) if repr(start) == 'NaT' else start end = Timestamp(end or max(Timestamp(self._end.max()), self._start.max())) # FIXME: end != end ? end = datetime.utcnow() if repr(end) == 'NaT' else end start = start if self.check_in_bounds(start) else self._lbound end = end if self.check_in_bounds(end) else self._rbound if scale == 'auto': scale = self._auto_select_scale(start, end) if scale == 'maximum': start_dts = list(self._start.dropna().values) end_dts = list(self._end.dropna().values) dts = map(Timestamp, set(start_dts + end_dts)) dts = filter(lambda ts: self.check_in_bounds(ts) and ts >= start and ts <= end, dts) return dts freq = dict(daily='D', weekly='W', monthly='M', quarterly='3M', yearly='12M') offset = dict(daily=off.Day(n=0), weekly=off.Week(), monthly=off.MonthEnd(), quarterly=off.QuarterEnd(), yearly=off.YearEnd()) # for some reason, weekly date range gives one week less: end_ = end + off.Week() if scale == 'weekly' else end ret = list(pd.date_range(start + offset[scale], end_, freq=freq[scale])) ret = [dt for dt in ret if dt <= end] ret = [start] + ret if ret and start < ret[0] else ret ret = ret + [end] if ret and end > ret[-1] else ret ret = filter(lambda ts: self.check_in_bounds(ts), ret) return ret
[ "def", "get_dates_range", "(", "self", ",", "scale", "=", "'auto'", ",", "start", "=", "None", ",", "end", "=", "None", ",", "date_max", "=", "'2010-01-01'", ")", ":", "if", "scale", "not", "in", "[", "'auto'", ",", "'maximum'", ",", "'daily'", ",", ...
Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included
[ "Returns", "a", "list", "of", "dates", "sampled", "according", "to", "the", "specified", "parameters", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L227-L275
kejbaly2/metrique
metrique/result.py
Result._auto_select_scale
def _auto_select_scale(self, start=None, end=None, ideal=300): ''' Guess what a good timeseries scale might be, given a particular data set, attempting to make the total number of x values as close to `ideal` as possible This is a helper for plotting ''' start = start or self._start.min() end = end or max(self._end.max(), self._start.max()) daily_count = (end - start).days if daily_count <= ideal: return 'daily' elif daily_count / 7 <= ideal: return 'weekly' elif daily_count / 30 <= ideal: return 'monthly' elif daily_count / 91 <= ideal: return 'quarterly' else: return 'yearly'
python
def _auto_select_scale(self, start=None, end=None, ideal=300): ''' Guess what a good timeseries scale might be, given a particular data set, attempting to make the total number of x values as close to `ideal` as possible This is a helper for plotting ''' start = start or self._start.min() end = end or max(self._end.max(), self._start.max()) daily_count = (end - start).days if daily_count <= ideal: return 'daily' elif daily_count / 7 <= ideal: return 'weekly' elif daily_count / 30 <= ideal: return 'monthly' elif daily_count / 91 <= ideal: return 'quarterly' else: return 'yearly'
[ "def", "_auto_select_scale", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "ideal", "=", "300", ")", ":", "start", "=", "start", "or", "self", ".", "_start", ".", "min", "(", ")", "end", "=", "end", "or", "max", "(", "self...
Guess what a good timeseries scale might be, given a particular data set, attempting to make the total number of x values as close to `ideal` as possible This is a helper for plotting
[ "Guess", "what", "a", "good", "timeseries", "scale", "might", "be", "given", "a", "particular", "data", "set", "attempting", "to", "make", "the", "total", "number", "of", "x", "values", "as", "close", "to", "ideal", "as", "possible" ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L277-L298
kejbaly2/metrique
metrique/result.py
Result.filter_oids
def filter_oids(self, oids): ''' Leaves only objects with specified oids. :param oids: list of oids to include ''' oids = set(oids) return self[self['_oid'].map(lambda x: x in oids)]
python
def filter_oids(self, oids): ''' Leaves only objects with specified oids. :param oids: list of oids to include ''' oids = set(oids) return self[self['_oid'].map(lambda x: x in oids)]
[ "def", "filter_oids", "(", "self", ",", "oids", ")", ":", "oids", "=", "set", "(", "oids", ")", "return", "self", "[", "self", "[", "'_oid'", "]", ".", "map", "(", "lambda", "x", ":", "x", "in", "oids", ")", "]" ]
Leaves only objects with specified oids. :param oids: list of oids to include
[ "Leaves", "only", "objects", "with", "specified", "oids", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L303-L310
kejbaly2/metrique
metrique/result.py
Result.unfinished_objects
def unfinished_objects(self): ''' Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`. ''' mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
python
def unfinished_objects(self): ''' Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`. ''' mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
[ "def", "unfinished_objects", "(", "self", ")", ":", "mask", "=", "self", ".", "_end_isnull", "if", "self", ".", "_rbound", "is", "not", "None", ":", "mask", "=", "mask", "|", "(", "self", ".", "_end", ">", "self", ".", "_rbound", ")", "oids", "=", ...
Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`.
[ "Leaves", "only", "versions", "of", "those", "objects", "that", "has", "some", "version", "with", "_end", "==", "None", "or", "with", "_end", ">", "right", "cutoff", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L313-L322
kejbaly2/metrique
metrique/result.py
Result.persistent_oid_counts
def persistent_oid_counts(self, dates): ''' Counts how many objects (identified by their oids) existed before or on a given date. :param dates: list of the dates the count should be computed. ''' total = pd.Series([self.on_date(d)._oid for d in dates], index=dates) for i in range(1, total.size): a1 = total[total.index[i - 1]] a2 = total[total.index[i]] total[total.index[i]] = list(set(a1) | set(a2)) return total.apply(len)
python
def persistent_oid_counts(self, dates): ''' Counts how many objects (identified by their oids) existed before or on a given date. :param dates: list of the dates the count should be computed. ''' total = pd.Series([self.on_date(d)._oid for d in dates], index=dates) for i in range(1, total.size): a1 = total[total.index[i - 1]] a2 = total[total.index[i]] total[total.index[i]] = list(set(a1) | set(a2)) return total.apply(len)
[ "def", "persistent_oid_counts", "(", "self", ",", "dates", ")", ":", "total", "=", "pd", ".", "Series", "(", "[", "self", ".", "on_date", "(", "d", ")", ".", "_oid", "for", "d", "in", "dates", "]", ",", "index", "=", "dates", ")", "for", "i", "in...
Counts how many objects (identified by their oids) existed before or on a given date. :param dates: list of the dates the count should be computed.
[ "Counts", "how", "many", "objects", "(", "identified", "by", "their", "oids", ")", "existed", "before", "or", "on", "a", "given", "date", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L324-L337
kejbaly2/metrique
metrique/result.py
Result.last_versions_with_age
def last_versions_with_age(self, col_name='age'): ''' Leaves only the latest version for each object. Adds a new column which represents age. The age is computed by subtracting _start of the oldest version from one of these possibilities:: # psuedo-code if self._rbound is None: if latest_version._end is pd.NaT: current_time is used else: min(current_time, latest_version._end) is used else: if latest_version._end is pd.NaT: self._rbound is used else: min(self._rbound, latest_version._end) is used :param index: name of the new column. ''' min_start_map = {} max_start_map = {} max_start_ser_map = {} cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') for row in self.values: s = row[i_start] oid = row[i_oid] mins = min_start_map.get(oid, s) if s <= mins: min_start_map[oid] = s maxs = max_start_map.get(oid, s) if s >= maxs: max_start_map[oid] = s max_start_ser_map[oid] = row vals = max_start_ser_map.values() cut_ts = datetime.utcnow() ages = [] for row in vals: end = row[i_end] end = cut_ts if end is pd.NaT else min(cut_ts, end) age = end - min_start_map[row[i_oid]] age = age - timedelta(microseconds=age.microseconds) ages.append(age) res = pd.DataFrame(max_start_ser_map.values(), columns=cols) res[col_name] = pd.Series(ages, index=res.index) return res
python
def last_versions_with_age(self, col_name='age'): ''' Leaves only the latest version for each object. Adds a new column which represents age. The age is computed by subtracting _start of the oldest version from one of these possibilities:: # psuedo-code if self._rbound is None: if latest_version._end is pd.NaT: current_time is used else: min(current_time, latest_version._end) is used else: if latest_version._end is pd.NaT: self._rbound is used else: min(self._rbound, latest_version._end) is used :param index: name of the new column. ''' min_start_map = {} max_start_map = {} max_start_ser_map = {} cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') for row in self.values: s = row[i_start] oid = row[i_oid] mins = min_start_map.get(oid, s) if s <= mins: min_start_map[oid] = s maxs = max_start_map.get(oid, s) if s >= maxs: max_start_map[oid] = s max_start_ser_map[oid] = row vals = max_start_ser_map.values() cut_ts = datetime.utcnow() ages = [] for row in vals: end = row[i_end] end = cut_ts if end is pd.NaT else min(cut_ts, end) age = end - min_start_map[row[i_oid]] age = age - timedelta(microseconds=age.microseconds) ages.append(age) res = pd.DataFrame(max_start_ser_map.values(), columns=cols) res[col_name] = pd.Series(ages, index=res.index) return res
[ "def", "last_versions_with_age", "(", "self", ",", "col_name", "=", "'age'", ")", ":", "min_start_map", "=", "{", "}", "max_start_map", "=", "{", "}", "max_start_ser_map", "=", "{", "}", "cols", "=", "self", ".", "columns", ".", "tolist", "(", ")", "i_oi...
Leaves only the latest version for each object. Adds a new column which represents age. The age is computed by subtracting _start of the oldest version from one of these possibilities:: # psuedo-code if self._rbound is None: if latest_version._end is pd.NaT: current_time is used else: min(current_time, latest_version._end) is used else: if latest_version._end is pd.NaT: self._rbound is used else: min(self._rbound, latest_version._end) is used :param index: name of the new column.
[ "Leaves", "only", "the", "latest", "version", "for", "each", "object", ".", "Adds", "a", "new", "column", "which", "represents", "age", ".", "The", "age", "is", "computed", "by", "subtracting", "_start", "of", "the", "oldest", "version", "from", "one", "of...
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L340-L392
kejbaly2/metrique
metrique/result.py
Result.last_chain
def last_chain(self): ''' Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another. ''' cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') start_map = {} end_map = {} for row in self.values: oid = row[i_oid] if oid not in start_map: start_map[oid] = set() end_map[oid] = set() start_map[oid].add(row[i_start]) end_map[oid].add(row[i_end]) cutoffs = {} for oid in start_map: maxend = pd.NaT if pd.NaT in end_map[oid] else max(end_map[oid]) ends = end_map[oid] - start_map[oid] - set([maxend]) cutoffs[oid] = None if len(ends) == 0 else max(ends) vals = [row for row in self.values if cutoffs[row[i_oid]] is None or cutoffs[row[i_oid]] < row[i_start]] return pd.DataFrame(vals, columns=cols)
python
def last_chain(self): ''' Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another. ''' cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') start_map = {} end_map = {} for row in self.values: oid = row[i_oid] if oid not in start_map: start_map[oid] = set() end_map[oid] = set() start_map[oid].add(row[i_start]) end_map[oid].add(row[i_end]) cutoffs = {} for oid in start_map: maxend = pd.NaT if pd.NaT in end_map[oid] else max(end_map[oid]) ends = end_map[oid] - start_map[oid] - set([maxend]) cutoffs[oid] = None if len(ends) == 0 else max(ends) vals = [row for row in self.values if cutoffs[row[i_oid]] is None or cutoffs[row[i_oid]] < row[i_start]] return pd.DataFrame(vals, columns=cols)
[ "def", "last_chain", "(", "self", ")", ":", "cols", "=", "self", ".", "columns", ".", "tolist", "(", ")", "i_oid", "=", "cols", ".", "index", "(", "'_oid'", ")", "i_start", "=", "cols", ".", "index", "(", "'_start'", ")", "i_end", "=", "cols", ".",...
Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another.
[ "Leaves", "only", "the", "last", "chain", "for", "each", "object", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L395-L427
kejbaly2/metrique
metrique/result.py
Result.one_version
def one_version(self, index=0): ''' Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last ''' def prep(df): start = sorted(df._start.tolist())[index] return df[df._start == start] return pd.concat([prep(df) for _, df in self.groupby(self._oid)])
python
def one_version(self, index=0): ''' Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last ''' def prep(df): start = sorted(df._start.tolist())[index] return df[df._start == start] return pd.concat([prep(df) for _, df in self.groupby(self._oid)])
[ "def", "one_version", "(", "self", ",", "index", "=", "0", ")", ":", "def", "prep", "(", "df", ")", ":", "start", "=", "sorted", "(", "df", ".", "_start", ".", "tolist", "(", ")", ")", "[", "index", "]", "return", "df", "[", "df", ".", "_start"...
Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last
[ "Leaves", "only", "one", "version", "for", "each", "object", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L430-L440
kejbaly2/metrique
metrique/result.py
Result.started_after
def started_after(self, date): ''' Leaves only those objects whose first version started after the specified date. :param date: date string to use in calculation ''' dt = Timestamp(date) starts = self.groupby(self._oid).apply(lambda df: df._start.min()) oids = set(starts[starts > dt].index.tolist()) return self[self._oid.apply(lambda v: v in oids)]
python
def started_after(self, date): ''' Leaves only those objects whose first version started after the specified date. :param date: date string to use in calculation ''' dt = Timestamp(date) starts = self.groupby(self._oid).apply(lambda df: df._start.min()) oids = set(starts[starts > dt].index.tolist()) return self[self._oid.apply(lambda v: v in oids)]
[ "def", "started_after", "(", "self", ",", "date", ")", ":", "dt", "=", "Timestamp", "(", "date", ")", "starts", "=", "self", ".", "groupby", "(", "self", ".", "_oid", ")", ".", "apply", "(", "lambda", "df", ":", "df", ".", "_start", ".", "min", "...
Leaves only those objects whose first version started after the specified date. :param date: date string to use in calculation
[ "Leaves", "only", "those", "objects", "whose", "first", "version", "started", "after", "the", "specified", "date", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L455-L465
kejbaly2/metrique
metrique/result.py
Result.object_apply
def object_apply(self, function): ''' Groups by _oid, then applies the function to each group and finally concatenates the results. :param function: func that takes a DataFrame and returns a DataFrame ''' return pd.concat([function(df) for _, df in self.groupby(self._oid)])
python
def object_apply(self, function): ''' Groups by _oid, then applies the function to each group and finally concatenates the results. :param function: func that takes a DataFrame and returns a DataFrame ''' return pd.concat([function(df) for _, df in self.groupby(self._oid)])
[ "def", "object_apply", "(", "self", ",", "function", ")", ":", "return", "pd", ".", "concat", "(", "[", "function", "(", "df", ")", "for", "_", ",", "df", "in", "self", ".", "groupby", "(", "self", ".", "_oid", ")", "]", ")" ]
Groups by _oid, then applies the function to each group and finally concatenates the results. :param function: func that takes a DataFrame and returns a DataFrame
[ "Groups", "by", "_oid", "then", "applies", "the", "function", "to", "each", "group", "and", "finally", "concatenates", "the", "results", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L476-L483
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._has_x
def _has_x(self, kwargs): '''Returns True if x is explicitly defined in kwargs''' return (('x' in kwargs) or (self._element_x in kwargs) or (self._type == 3 and self._element_1mx in kwargs))
python
def _has_x(self, kwargs): '''Returns True if x is explicitly defined in kwargs''' return (('x' in kwargs) or (self._element_x in kwargs) or (self._type == 3 and self._element_1mx in kwargs))
[ "def", "_has_x", "(", "self", ",", "kwargs", ")", ":", "return", "(", "(", "'x'", "in", "kwargs", ")", "or", "(", "self", ".", "_element_x", "in", "kwargs", ")", "or", "(", "self", ".", "_type", "==", "3", "and", "self", ".", "_element_1mx", "in", ...
Returns True if x is explicitly defined in kwargs
[ "Returns", "True", "if", "x", "is", "explicitly", "defined", "in", "kwargs" ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L163-L166
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._get_x
def _get_x(self, kwargs): ''' Returns x if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'x' in kwargs: return round(float(kwargs['x']), 6) elif self._element_x in kwargs: return round(float(kwargs[self._element_x]), 6) elif self._type == 3 and self._element_1mx in kwargs: return round(1. - float(kwargs[self._element_1mx]), 6) else: raise TypeError()
python
def _get_x(self, kwargs): ''' Returns x if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'x' in kwargs: return round(float(kwargs['x']), 6) elif self._element_x in kwargs: return round(float(kwargs[self._element_x]), 6) elif self._type == 3 and self._element_1mx in kwargs: return round(1. - float(kwargs[self._element_1mx]), 6) else: raise TypeError()
[ "def", "_get_x", "(", "self", ",", "kwargs", ")", ":", "if", "'x'", "in", "kwargs", ":", "return", "round", "(", "float", "(", "kwargs", "[", "'x'", "]", ")", ",", "6", ")", "elif", "self", ".", "_element_x", "in", "kwargs", ":", "return", "round",...
Returns x if it is explicitly defined in kwargs. Otherwise, raises TypeError.
[ "Returns", "x", "if", "it", "is", "explicitly", "defined", "in", "kwargs", ".", "Otherwise", "raises", "TypeError", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L168-L180
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._has_y
def _has_y(self, kwargs): '''Returns True if y is explicitly defined in kwargs''' return (('y' in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
python
def _has_y(self, kwargs): '''Returns True if y is explicitly defined in kwargs''' return (('y' in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
[ "def", "_has_y", "(", "self", ",", "kwargs", ")", ":", "return", "(", "(", "'y'", "in", "kwargs", ")", "or", "(", "self", ".", "_element_y", "in", "kwargs", ")", "or", "(", "self", ".", "_type", "==", "3", "and", "self", ".", "_element_1my", "in", ...
Returns True if y is explicitly defined in kwargs
[ "Returns", "True", "if", "y", "is", "explicitly", "defined", "in", "kwargs" ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L182-L185
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._get_y
def _get_y(self, kwargs): ''' Returns y if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'y' in kwargs: return round(float(kwargs['y']), 6) elif self._element_y in kwargs: return round(float(kwargs[self._element_y]), 6) elif self._type == 3 and self._element_1my in kwargs: return round(1. - float(kwargs[self._element_1my]), 6) else: raise TypeError()
python
def _get_y(self, kwargs): ''' Returns y if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'y' in kwargs: return round(float(kwargs['y']), 6) elif self._element_y in kwargs: return round(float(kwargs[self._element_y]), 6) elif self._type == 3 and self._element_1my in kwargs: return round(1. - float(kwargs[self._element_1my]), 6) else: raise TypeError()
[ "def", "_get_y", "(", "self", ",", "kwargs", ")", ":", "if", "'y'", "in", "kwargs", ":", "return", "round", "(", "float", "(", "kwargs", "[", "'y'", "]", ")", ",", "6", ")", "elif", "self", ".", "_element_y", "in", "kwargs", ":", "return", "round",...
Returns y if it is explicitly defined in kwargs. Otherwise, raises TypeError.
[ "Returns", "y", "if", "it", "is", "explicitly", "defined", "in", "kwargs", ".", "Otherwise", "raises", "TypeError", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L187-L199
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._has_z
def _has_z(self, kwargs): ''' Returns True if type is 1 or 2 and z is explicitly defined in kwargs. ''' return ((self._type == 1 or self._type ==2) and (('z' in kwargs) or (self._element_z in kwargs)))
python
def _has_z(self, kwargs): ''' Returns True if type is 1 or 2 and z is explicitly defined in kwargs. ''' return ((self._type == 1 or self._type ==2) and (('z' in kwargs) or (self._element_z in kwargs)))
[ "def", "_has_z", "(", "self", ",", "kwargs", ")", ":", "return", "(", "(", "self", ".", "_type", "==", "1", "or", "self", ".", "_type", "==", "2", ")", "and", "(", "(", "'z'", "in", "kwargs", ")", "or", "(", "self", ".", "_element_z", "in", "kw...
Returns True if type is 1 or 2 and z is explicitly defined in kwargs.
[ "Returns", "True", "if", "type", "is", "1", "or", "2", "and", "z", "is", "explicitly", "defined", "in", "kwargs", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L201-L206
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_quaternary.py
IIIVZincBlendeQuaternary._get_z
def _get_z(self, kwargs): ''' Returns z if type is 1 or 2 and z is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if self._type == 1 or self._type == 2: if 'z' in kwargs: return round(float(kwargs['z']), 6) elif self._element_z in kwargs: return round(float(kwargs[self._element_z]), 6) raise TypeError()
python
def _get_z(self, kwargs): ''' Returns z if type is 1 or 2 and z is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if self._type == 1 or self._type == 2: if 'z' in kwargs: return round(float(kwargs['z']), 6) elif self._element_z in kwargs: return round(float(kwargs[self._element_z]), 6) raise TypeError()
[ "def", "_get_z", "(", "self", ",", "kwargs", ")", ":", "if", "self", ".", "_type", "==", "1", "or", "self", ".", "_type", "==", "2", ":", "if", "'z'", "in", "kwargs", ":", "return", "round", "(", "float", "(", "kwargs", "[", "'z'", "]", ")", ",...
Returns z if type is 1 or 2 and z is explicitly defined in kwargs. Otherwise, raises TypeError.
[ "Returns", "z", "if", "type", "is", "1", "or", "2", "and", "z", "is", "explicitly", "defined", "in", "kwargs", ".", "Otherwise", "raises", "TypeError", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_quaternary.py#L208-L218
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.exists
def exists(cls, query=None, path=None, **kwargs): """ Like __init__ but simply returns a boolean as to whether or not the object exists, rather than returning the whole object. NOTE: if you pass in a single argument to exists, this will match against ID_KEY. @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object @param **kwargs: used as query parameters if query is None @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these """ if query is None and len(kwargs) > 0: query = kwargs if query is None: return False return cls.db(path).find_one(query) is not None
python
def exists(cls, query=None, path=None, **kwargs): """ Like __init__ but simply returns a boolean as to whether or not the object exists, rather than returning the whole object. NOTE: if you pass in a single argument to exists, this will match against ID_KEY. @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object @param **kwargs: used as query parameters if query is None @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these """ if query is None and len(kwargs) > 0: query = kwargs if query is None: return False return cls.db(path).find_one(query) is not None
[ "def", "exists", "(", "cls", ",", "query", "=", "None", ",", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "query", "is", "None", "and", "len", "(", "kwargs", ")", ">", "0", ":", "query", "=", "kwargs", "if", "query", "is", "Non...
Like __init__ but simply returns a boolean as to whether or not the object exists, rather than returning the whole object. NOTE: if you pass in a single argument to exists, this will match against ID_KEY. @param query: a dictionary specifying key-value pairs that the result must match. If query is None, use kwargs in it's place @param path: the path of the database to query, in the form "database.colletion"; pass None to use the value of the PATH property of the object @param **kwargs: used as query parameters if query is None @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these
[ "Like", "__init__", "but", "simply", "returns", "a", "boolean", "as", "to", "whether", "or", "not", "the", "object", "exists", "rather", "than", "returning", "the", "whole", "object", ".", "NOTE", ":", "if", "you", "pass", "in", "a", "single", "argument", ...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L146-L168
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.create
def create(cls, data, path=None, defaults=None, overwrite=False, random_id=False, **kwargs): """ Creates a new database object and stores it in the database NOTE: The path and defaults parameters to this function are to allow use of the DatabaseObject class directly. However, this class is intended for subclassing and children of it should override the PATH and DEFAULTS attributes rather than passing them as parameters here. @param data: dictionary of data that the object should be created with; this must follow all mongo rules, as well as have an entry for ID_KEY unless random_id == True @param path: the path of the database to use, in the form "database.collection" @param defaults: the defaults dictionary to use for this object @param overwrite: if set to true, will overwrite any object in the database with the same ID_KEY; if set to false will raise an exception if there is another object with the same ID_KEY @param random_id: stores the new object with a random value for ID_KEY; overwrites data[ID_KEY] @param **kwargs: ignored @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these @raise DatabaseConflictError: if there is already an object with that ID_KEY and overwrite == False @raise MalformedObjectError: if a REQUIRED key of defaults is missing, or if the ID_KEY of the object is None and random_id is False """ self = cls(path=path, defaults=defaults, _new_object=data) for key, value in self.items(): if key == ID_KEY: continue if self.DEFAULTS and key not in self.DEFAULTS: self._handle_non_default_key(key, value) self._check_type(key, value) if random_id and ID_KEY in self: dict.__delitem__(self, ID_KEY) if not random_id and ID_KEY not in self: raise MalformedObjectError("No " + ID_KEY + " key in item") if not random_id and not overwrite and self._collection.find_one({ID_KEY: data[ID_KEY]}): raise DatabaseConflictError('ID_KEY "%s" already exists in collection %s' % (data[ID_KEY], self.PATH)) self._pre_save() if ID_KEY in self and overwrite: self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self), upsert=True) else: insert_result = self._collection.insert_one(dict(self)) dict.__setitem__(self, ID_KEY, insert_result.inserted_id) return self
python
def create(cls, data, path=None, defaults=None, overwrite=False, random_id=False, **kwargs): """ Creates a new database object and stores it in the database NOTE: The path and defaults parameters to this function are to allow use of the DatabaseObject class directly. However, this class is intended for subclassing and children of it should override the PATH and DEFAULTS attributes rather than passing them as parameters here. @param data: dictionary of data that the object should be created with; this must follow all mongo rules, as well as have an entry for ID_KEY unless random_id == True @param path: the path of the database to use, in the form "database.collection" @param defaults: the defaults dictionary to use for this object @param overwrite: if set to true, will overwrite any object in the database with the same ID_KEY; if set to false will raise an exception if there is another object with the same ID_KEY @param random_id: stores the new object with a random value for ID_KEY; overwrites data[ID_KEY] @param **kwargs: ignored @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these @raise DatabaseConflictError: if there is already an object with that ID_KEY and overwrite == False @raise MalformedObjectError: if a REQUIRED key of defaults is missing, or if the ID_KEY of the object is None and random_id is False """ self = cls(path=path, defaults=defaults, _new_object=data) for key, value in self.items(): if key == ID_KEY: continue if self.DEFAULTS and key not in self.DEFAULTS: self._handle_non_default_key(key, value) self._check_type(key, value) if random_id and ID_KEY in self: dict.__delitem__(self, ID_KEY) if not random_id and ID_KEY not in self: raise MalformedObjectError("No " + ID_KEY + " key in item") if not random_id and not overwrite and self._collection.find_one({ID_KEY: data[ID_KEY]}): raise DatabaseConflictError('ID_KEY "%s" already exists in collection %s' % (data[ID_KEY], self.PATH)) self._pre_save() if ID_KEY in self and overwrite: self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self), upsert=True) else: insert_result = self._collection.insert_one(dict(self)) dict.__setitem__(self, ID_KEY, insert_result.inserted_id) return self
[ "def", "create", "(", "cls", ",", "data", ",", "path", "=", "None", ",", "defaults", "=", "None", ",", "overwrite", "=", "False", ",", "random_id", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", "=", "cls", "(", "path", "=", "path", ","...
Creates a new database object and stores it in the database NOTE: The path and defaults parameters to this function are to allow use of the DatabaseObject class directly. However, this class is intended for subclassing and children of it should override the PATH and DEFAULTS attributes rather than passing them as parameters here. @param data: dictionary of data that the object should be created with; this must follow all mongo rules, as well as have an entry for ID_KEY unless random_id == True @param path: the path of the database to use, in the form "database.collection" @param defaults: the defaults dictionary to use for this object @param overwrite: if set to true, will overwrite any object in the database with the same ID_KEY; if set to false will raise an exception if there is another object with the same ID_KEY @param random_id: stores the new object with a random value for ID_KEY; overwrites data[ID_KEY] @param **kwargs: ignored @raise Exception: if path and self.PATH are None; the database path must be defined in at least one of these @raise DatabaseConflictError: if there is already an object with that ID_KEY and overwrite == False @raise MalformedObjectError: if a REQUIRED key of defaults is missing, or if the ID_KEY of the object is None and random_id is False
[ "Creates", "a", "new", "database", "object", "and", "stores", "it", "in", "the", "database", "NOTE", ":", "The", "path", "and", "defaults", "parameters", "to", "this", "function", "are", "to", "allow", "use", "of", "the", "DatabaseObject", "class", "directly...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L171-L221
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.create_from_json
def create_from_json(cls, json_str, ignore_non_defaults=True): """ Creates a database object from a json object. The intent of this method is to allow creating a database object directly from json. Mongolia will also automatically convert any json values that are formatted using the MongoliaJSONEncoder (for ObjectIds and datetime objects) back to their native python data types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for creating the new object @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys of the create json that do not appear in DEFAULTS will also be excluded in creation """ create_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove all keys not in DEFAULTS if ignore_non_defaults is True if cls.DEFAULTS and ignore_non_defaults: for key in frozenset(create_dict).difference(frozenset(cls.DEFAULTS)): del create_dict[key] cls.create(create_dict, random_id=True)
python
def create_from_json(cls, json_str, ignore_non_defaults=True): """ Creates a database object from a json object. The intent of this method is to allow creating a database object directly from json. Mongolia will also automatically convert any json values that are formatted using the MongoliaJSONEncoder (for ObjectIds and datetime objects) back to their native python data types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for creating the new object @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys of the create json that do not appear in DEFAULTS will also be excluded in creation """ create_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove all keys not in DEFAULTS if ignore_non_defaults is True if cls.DEFAULTS and ignore_non_defaults: for key in frozenset(create_dict).difference(frozenset(cls.DEFAULTS)): del create_dict[key] cls.create(create_dict, random_id=True)
[ "def", "create_from_json", "(", "cls", ",", "json_str", ",", "ignore_non_defaults", "=", "True", ")", ":", "create_dict", "=", "json", ".", "loads", "(", "json_str", ",", "cls", "=", "MongoliaJSONDecoder", ",", "encoding", "=", "\"utf-8\"", ")", "# Remove all ...
Creates a database object from a json object. The intent of this method is to allow creating a database object directly from json. Mongolia will also automatically convert any json values that are formatted using the MongoliaJSONEncoder (for ObjectIds and datetime objects) back to their native python data types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for creating the new object @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys of the create json that do not appear in DEFAULTS will also be excluded in creation
[ "Creates", "a", "database", "object", "from", "a", "json", "object", ".", "The", "intent", "of", "this", "method", "is", "to", "allow", "creating", "a", "database", "object", "directly", "from", "json", ".", "Mongolia", "will", "also", "automatically", "conv...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L224-L250
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.db
def db(cls, path=None): """ Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid """ if cls.PATH is None and path is None: raise Exception("No database specified") if path is None: path = cls.PATH if "." not in path: raise Exception(('invalid path "%s"; database paths must be ' + 'of the form "database.collection"') % (path,)) if CONNECTION.test_mode: return CONNECTION.get_connection()[TEST_DATABASE_NAME][path] (db, coll) = path.split('.', 1) return CONNECTION.get_connection()[db][coll]
python
def db(cls, path=None): """ Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid """ if cls.PATH is None and path is None: raise Exception("No database specified") if path is None: path = cls.PATH if "." not in path: raise Exception(('invalid path "%s"; database paths must be ' + 'of the form "database.collection"') % (path,)) if CONNECTION.test_mode: return CONNECTION.get_connection()[TEST_DATABASE_NAME][path] (db, coll) = path.split('.', 1) return CONNECTION.get_connection()[db][coll]
[ "def", "db", "(", "cls", ",", "path", "=", "None", ")", ":", "if", "cls", ".", "PATH", "is", "None", "and", "path", "is", "None", ":", "raise", "Exception", "(", "\"No database specified\"", ")", "if", "path", "is", "None", ":", "path", "=", "cls", ...
Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid
[ "Returns", "a", "pymongo", "Collection", "object", "from", "the", "current", "database", "connection", ".", "If", "the", "database", "connection", "is", "in", "test", "mode", "collection", "will", "be", "in", "the", "test", "database", "." ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L253-L274
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.save
def save(self): """ Saves the current state of the DatabaseObject to the database. Fills in missing values from defaults before saving. NOTE: The actual operation here is to overwrite the entry in the database with the same ID_KEY. WARNING: While the save operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. The update method is better from a concurrency perspective. @raise MalformedObjectError: if the object does not provide a value for a REQUIRED default """ self._pre_save() self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self))
python
def save(self): """ Saves the current state of the DatabaseObject to the database. Fills in missing values from defaults before saving. NOTE: The actual operation here is to overwrite the entry in the database with the same ID_KEY. WARNING: While the save operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. The update method is better from a concurrency perspective. @raise MalformedObjectError: if the object does not provide a value for a REQUIRED default """ self._pre_save() self._collection.replace_one({ID_KEY: self[ID_KEY]}, dict(self))
[ "def", "save", "(", "self", ")", ":", "self", ".", "_pre_save", "(", ")", "self", ".", "_collection", ".", "replace_one", "(", "{", "ID_KEY", ":", "self", "[", "ID_KEY", "]", "}", ",", "dict", "(", "self", ")", ")" ]
Saves the current state of the DatabaseObject to the database. Fills in missing values from defaults before saving. NOTE: The actual operation here is to overwrite the entry in the database with the same ID_KEY. WARNING: While the save operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. The update method is better from a concurrency perspective. @raise MalformedObjectError: if the object does not provide a value for a REQUIRED default
[ "Saves", "the", "current", "state", "of", "the", "DatabaseObject", "to", "the", "database", ".", "Fills", "in", "missing", "values", "from", "defaults", "before", "saving", ".", "NOTE", ":", "The", "actual", "operation", "here", "is", "to", "overwrite", "the...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L343-L361
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.rename
def rename(self, new_id): """ Renames the DatabaseObject to have ID_KEY new_id. This is the only way allowed by DatabaseObject to change the ID_KEY of an object. Trying to modify ID_KEY in the dictionary will raise an exception. @param new_id: the new value for ID_KEY NOTE: This is actually a create and delete. WARNING: If the system fails during a rename, data may be duplicated. """ old_id = dict.__getitem__(self, ID_KEY) dict.__setitem__(self, ID_KEY, new_id) self._collection.save(self) self._collection.remove({ID_KEY: old_id})
python
def rename(self, new_id): """ Renames the DatabaseObject to have ID_KEY new_id. This is the only way allowed by DatabaseObject to change the ID_KEY of an object. Trying to modify ID_KEY in the dictionary will raise an exception. @param new_id: the new value for ID_KEY NOTE: This is actually a create and delete. WARNING: If the system fails during a rename, data may be duplicated. """ old_id = dict.__getitem__(self, ID_KEY) dict.__setitem__(self, ID_KEY, new_id) self._collection.save(self) self._collection.remove({ID_KEY: old_id})
[ "def", "rename", "(", "self", ",", "new_id", ")", ":", "old_id", "=", "dict", ".", "__getitem__", "(", "self", ",", "ID_KEY", ")", "dict", ".", "__setitem__", "(", "self", ",", "ID_KEY", ",", "new_id", ")", "self", ".", "_collection", ".", "save", "(...
Renames the DatabaseObject to have ID_KEY new_id. This is the only way allowed by DatabaseObject to change the ID_KEY of an object. Trying to modify ID_KEY in the dictionary will raise an exception. @param new_id: the new value for ID_KEY NOTE: This is actually a create and delete. WARNING: If the system fails during a rename, data may be duplicated.
[ "Renames", "the", "DatabaseObject", "to", "have", "ID_KEY", "new_id", ".", "This", "is", "the", "only", "way", "allowed", "by", "DatabaseObject", "to", "change", "the", "ID_KEY", "of", "an", "object", ".", "Trying", "to", "modify", "ID_KEY", "in", "the", "...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L363-L378
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.remove
def remove(self): """ Deletes the object from the database WARNING: This cannot be undone. Be really careful when deleting programatically. It is recommended to backup your database before applying specific deletes. If your application uses deletes regularly, it is strongly recommended that you have a recurring backup system. """ self._collection.remove({ID_KEY: self[ID_KEY]}) dict.clear(self)
python
def remove(self): """ Deletes the object from the database WARNING: This cannot be undone. Be really careful when deleting programatically. It is recommended to backup your database before applying specific deletes. If your application uses deletes regularly, it is strongly recommended that you have a recurring backup system. """ self._collection.remove({ID_KEY: self[ID_KEY]}) dict.clear(self)
[ "def", "remove", "(", "self", ")", ":", "self", ".", "_collection", ".", "remove", "(", "{", "ID_KEY", ":", "self", "[", "ID_KEY", "]", "}", ")", "dict", ".", "clear", "(", "self", ")" ]
Deletes the object from the database WARNING: This cannot be undone. Be really careful when deleting programatically. It is recommended to backup your database before applying specific deletes. If your application uses deletes regularly, it is strongly recommended that you have a recurring backup system.
[ "Deletes", "the", "object", "from", "the", "database", "WARNING", ":", "This", "cannot", "be", "undone", ".", "Be", "really", "careful", "when", "deleting", "programatically", ".", "It", "is", "recommended", "to", "backup", "your", "database", "before", "apply...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L380-L390
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.copy
def copy(self, new_id=None, attribute_overrides={}): """ Copies the DatabaseObject under the ID_KEY new_id. @param new_id: the value for ID_KEY of the copy; if this is none, creates the new object with a random ID_KEY @param attribute_overrides: dictionary of attribute names -> values that you would like to override with. """ data = dict(self) data.update(attribute_overrides) if new_id is not None: data[ID_KEY] = new_id return self.create(data, path=self.PATH) else: del data[ID_KEY] return self.create(data, random_id=True, path=self.PATH)
python
def copy(self, new_id=None, attribute_overrides={}): """ Copies the DatabaseObject under the ID_KEY new_id. @param new_id: the value for ID_KEY of the copy; if this is none, creates the new object with a random ID_KEY @param attribute_overrides: dictionary of attribute names -> values that you would like to override with. """ data = dict(self) data.update(attribute_overrides) if new_id is not None: data[ID_KEY] = new_id return self.create(data, path=self.PATH) else: del data[ID_KEY] return self.create(data, random_id=True, path=self.PATH)
[ "def", "copy", "(", "self", ",", "new_id", "=", "None", ",", "attribute_overrides", "=", "{", "}", ")", ":", "data", "=", "dict", "(", "self", ")", "data", ".", "update", "(", "attribute_overrides", ")", "if", "new_id", "is", "not", "None", ":", "dat...
Copies the DatabaseObject under the ID_KEY new_id. @param new_id: the value for ID_KEY of the copy; if this is none, creates the new object with a random ID_KEY @param attribute_overrides: dictionary of attribute names -> values that you would like to override with.
[ "Copies", "the", "DatabaseObject", "under", "the", "ID_KEY", "new_id", "." ]
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L392-L407
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.update
def update(self, update_dict=None, raw=False, **kwargs): """ Applies updates both to the database object and to the database via the mongo update method with the $set argument. Use the `raw` keyword to perform an arbitrary mongo update query. WARNING: Raw updates do not perform type checking. WARNING: While the update operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. While this is safer from a concurrency perspective than the access pattern load -> modify -> save as it only updates keys specified in the update_dict, it will still overwrite updates to those same keys that were made while the object was held in memory. @param update_dict: dictionary of updates to apply @param raw: if set to True, uses the contents of update_dict directly to perform the update rather than wrapping them in $set. @param **kwargs: used as update_dict if no update_dict is None """ if update_dict is None: update_dict = kwargs if raw: self._collection.update_one({ID_KEY: self[ID_KEY]}, update_dict) new_data = self._collection.find_one({ID_KEY: self[ID_KEY]}) dict.clear(self) dict.update(self, new_data) else: for key, value in update_dict.items(): self._check_type(key, value) dict.update(self, update_dict) self._collection.update_one({ID_KEY: self[ID_KEY]}, {SET: update_dict})
python
def update(self, update_dict=None, raw=False, **kwargs): """ Applies updates both to the database object and to the database via the mongo update method with the $set argument. Use the `raw` keyword to perform an arbitrary mongo update query. WARNING: Raw updates do not perform type checking. WARNING: While the update operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. While this is safer from a concurrency perspective than the access pattern load -> modify -> save as it only updates keys specified in the update_dict, it will still overwrite updates to those same keys that were made while the object was held in memory. @param update_dict: dictionary of updates to apply @param raw: if set to True, uses the contents of update_dict directly to perform the update rather than wrapping them in $set. @param **kwargs: used as update_dict if no update_dict is None """ if update_dict is None: update_dict = kwargs if raw: self._collection.update_one({ID_KEY: self[ID_KEY]}, update_dict) new_data = self._collection.find_one({ID_KEY: self[ID_KEY]}) dict.clear(self) dict.update(self, new_data) else: for key, value in update_dict.items(): self._check_type(key, value) dict.update(self, update_dict) self._collection.update_one({ID_KEY: self[ID_KEY]}, {SET: update_dict})
[ "def", "update", "(", "self", ",", "update_dict", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "update_dict", "is", "None", ":", "update_dict", "=", "kwargs", "if", "raw", ":", "self", ".", "_collection", ".", "upda...
Applies updates both to the database object and to the database via the mongo update method with the $set argument. Use the `raw` keyword to perform an arbitrary mongo update query. WARNING: Raw updates do not perform type checking. WARNING: While the update operation itself is atomic, it is not atomic with loads and modifications to the object. You must provide your own synchronization if you have multiple threads or processes possibly modifying the same database object. While this is safer from a concurrency perspective than the access pattern load -> modify -> save as it only updates keys specified in the update_dict, it will still overwrite updates to those same keys that were made while the object was held in memory. @param update_dict: dictionary of updates to apply @param raw: if set to True, uses the contents of update_dict directly to perform the update rather than wrapping them in $set. @param **kwargs: used as update_dict if no update_dict is None
[ "Applies", "updates", "both", "to", "the", "database", "object", "and", "to", "the", "database", "via", "the", "mongo", "update", "method", "with", "the", "$set", "argument", ".", "Use", "the", "raw", "keyword", "to", "perform", "an", "arbitrary", "mongo", ...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L409-L442
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.json_update
def json_update(self, json_str, exclude=[], ignore_non_defaults=True): """ Updates a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param exclude: a list of top-level keys to exclude from the update (ID_KEY need not be included in this list; it is automatically deleted since it can't be part of a mongo update operation) @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys in the update json that do not appear in DEFAULTS will also be excluded from the update """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove ID_KEY since it can't be part of a mongo update operation if ID_KEY in update_dict: del update_dict[ID_KEY] # Remove all keys in the exclude list from the update for key in frozenset(exclude).intersection(frozenset(update_dict)): del update_dict[key] # Remove all keys not in DEFAULTS if ignore_non_defaults is True if self.DEFAULTS and ignore_non_defaults: for key in frozenset(update_dict).difference(frozenset(self.DEFAULTS)): del update_dict[key] self.update(update_dict)
python
def json_update(self, json_str, exclude=[], ignore_non_defaults=True): """ Updates a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param exclude: a list of top-level keys to exclude from the update (ID_KEY need not be included in this list; it is automatically deleted since it can't be part of a mongo update operation) @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys in the update json that do not appear in DEFAULTS will also be excluded from the update """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") # Remove ID_KEY since it can't be part of a mongo update operation if ID_KEY in update_dict: del update_dict[ID_KEY] # Remove all keys in the exclude list from the update for key in frozenset(exclude).intersection(frozenset(update_dict)): del update_dict[key] # Remove all keys not in DEFAULTS if ignore_non_defaults is True if self.DEFAULTS and ignore_non_defaults: for key in frozenset(update_dict).difference(frozenset(self.DEFAULTS)): del update_dict[key] self.update(update_dict)
[ "def", "json_update", "(", "self", ",", "json_str", ",", "exclude", "=", "[", "]", ",", "ignore_non_defaults", "=", "True", ")", ":", "update_dict", "=", "json", ".", "loads", "(", "json_str", ",", "cls", "=", "MongoliaJSONDecoder", ",", "encoding", "=", ...
Updates a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param exclude: a list of top-level keys to exclude from the update (ID_KEY need not be included in this list; it is automatically deleted since it can't be part of a mongo update operation) @param ignore_non_defaults: if this is True and the database object has non-empty DEFAULTS, then any top-level keys in the update json that do not appear in DEFAULTS will also be excluded from the update
[ "Updates", "a", "database", "object", "based", "on", "a", "json", "object", ".", "The", "intent", "of", "this", "method", "is", "to", "allow", "passing", "json", "to", "an", "interface", "which", "then", "subsequently", "manipulates", "the", "object", "and",...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L454-L492
zagaran/mongolia
mongolia/database_object.py
DatabaseObject.json_update_fields
def json_update_fields(self, json_str, fields_to_update): """ Updates the specified fields of a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update for specific fields of the object. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param fields_to_update: a list of the top-level keys to update; only keys included in this list will be update. Do not include ID_KEY in this list since it can't be part of a mongo update operation """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") update_dict = dict((k, v) for k, v in update_dict.items() if k in fields_to_update and k != ID_KEY) self.update(update_dict)
python
def json_update_fields(self, json_str, fields_to_update): """ Updates the specified fields of a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update for specific fields of the object. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param fields_to_update: a list of the top-level keys to update; only keys included in this list will be update. Do not include ID_KEY in this list since it can't be part of a mongo update operation """ update_dict = json.loads(json_str, cls=MongoliaJSONDecoder, encoding="utf-8") update_dict = dict((k, v) for k, v in update_dict.items() if k in fields_to_update and k != ID_KEY) self.update(update_dict)
[ "def", "json_update_fields", "(", "self", ",", "json_str", ",", "fields_to_update", ")", ":", "update_dict", "=", "json", ".", "loads", "(", "json_str", ",", "cls", "=", "MongoliaJSONDecoder", ",", "encoding", "=", "\"utf-8\"", ")", "update_dict", "=", "dict",...
Updates the specified fields of a database object based on a json object. The intent of this method is to allow passing json to an interface which then subsequently manipulates the object and then sends back an update for specific fields of the object. Mongolia will also automatically convert any json values that were initially converted from ObjectId and datetime.datetime objects back to their native python object types. Note: if using AngularJS, make sure to pass json back using `angular.toJson(obj)` instead of `JSON.stringify(obj)` since angular sometimes adds `$$hashkey` to javascript objects and this will cause a mongo error due to the "$" prefix in keys. @param json_str: the json string containing the new object to use for the update @param fields_to_update: a list of the top-level keys to update; only keys included in this list will be update. Do not include ID_KEY in this list since it can't be part of a mongo update operation
[ "Updates", "the", "specified", "fields", "of", "a", "database", "object", "based", "on", "a", "json", "object", ".", "The", "intent", "of", "this", "method", "is", "to", "allow", "passing", "json", "to", "an", "interface", "which", "then", "subsequently", ...
train
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/database_object.py#L494-L519
biocore/burrito-fillings
bfillings/raxml_v730.py
raxml_alignment
def raxml_alignment(align_obj, raxml_model="GTRCAT", params={}, SuppressStderr=True, SuppressStdout=True): """Run raxml on alignment object align_obj: Alignment object params: you can set any params except -w and -n returns: tuple (phylonode, parsimonyphylonode, log likelihood, total exec time) """ # generate temp filename for output params["-w"] = "/tmp/" params["-n"] = get_tmp_filename().split("/")[-1] params["-m"] = raxml_model params["-p"] = randint(1,100000) ih = '_input_as_multiline_string' seqs, align_map = align_obj.toPhylip() #print params["-n"] # set up command raxml_app = Raxml( params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # run raxml ra = raxml_app(seqs) # generate tree tree_node = DndParser(ra["Result"]) # generate parsimony tree parsimony_tree_node = DndParser(ra["ParsimonyTree"]) # extract log likelihood from log file log_file = ra["Log"] total_exec_time = exec_time = log_likelihood = 0.0 for line in log_file: exec_time, log_likelihood = map(float, line.split()) total_exec_time += exec_time # remove output files ra.cleanUp() return tree_node, parsimony_tree_node, log_likelihood, total_exec_time
python
def raxml_alignment(align_obj, raxml_model="GTRCAT", params={}, SuppressStderr=True, SuppressStdout=True): """Run raxml on alignment object align_obj: Alignment object params: you can set any params except -w and -n returns: tuple (phylonode, parsimonyphylonode, log likelihood, total exec time) """ # generate temp filename for output params["-w"] = "/tmp/" params["-n"] = get_tmp_filename().split("/")[-1] params["-m"] = raxml_model params["-p"] = randint(1,100000) ih = '_input_as_multiline_string' seqs, align_map = align_obj.toPhylip() #print params["-n"] # set up command raxml_app = Raxml( params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # run raxml ra = raxml_app(seqs) # generate tree tree_node = DndParser(ra["Result"]) # generate parsimony tree parsimony_tree_node = DndParser(ra["ParsimonyTree"]) # extract log likelihood from log file log_file = ra["Log"] total_exec_time = exec_time = log_likelihood = 0.0 for line in log_file: exec_time, log_likelihood = map(float, line.split()) total_exec_time += exec_time # remove output files ra.cleanUp() return tree_node, parsimony_tree_node, log_likelihood, total_exec_time
[ "def", "raxml_alignment", "(", "align_obj", ",", "raxml_model", "=", "\"GTRCAT\"", ",", "params", "=", "{", "}", ",", "SuppressStderr", "=", "True", ",", "SuppressStdout", "=", "True", ")", ":", "# generate temp filename for output", "params", "[", "\"-w\"", "]"...
Run raxml on alignment object align_obj: Alignment object params: you can set any params except -w and -n returns: tuple (phylonode, parsimonyphylonode, log likelihood, total exec time)
[ "Run", "raxml", "on", "alignment", "object" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L712-L765
biocore/burrito-fillings
bfillings/raxml_v730.py
build_tree_from_alignment
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params={}): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: best_tree suppport is currently not implemented params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ if best_tree: raise NotImplementedError if '-m' not in params: if moltype == DNA or moltype == RNA: #params["-m"] = 'GTRMIX' # in version 7.2.3, GTRMIX is no longer supported but says GTRCAT # behaves like GTRMIX (http://www.phylo.org/tools/raxmlhpc2.html) params["-m"] = 'GTRGAMMA' elif moltype == PROTEIN: params["-m"] = 'PROTGAMMAmatrixName' else: raise ValueError, "Moltype must be either DNA, RNA, or PROTEIN" if not hasattr(aln, 'toPhylip'): aln = Alignment(aln) seqs, align_map = aln.toPhylip() # generate temp filename for output params["-w"] = "/tmp/" params["-n"] = get_tmp_filename().split("/")[-1] params["-k"] = True params["-p"] = randint(1,100000) params["-x"] = randint(1,100000) ih = '_input_as_multiline_string' raxml_app = Raxml(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=True, SuppressStdout=True) raxml_result = raxml_app(seqs) tree = DndParser(raxml_result['Bootstrap'], constructor=PhyloNode) for node in tree.tips(): node.Name = align_map[node.Name] raxml_result.cleanUp() return tree
python
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params={}): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: best_tree suppport is currently not implemented params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ if best_tree: raise NotImplementedError if '-m' not in params: if moltype == DNA or moltype == RNA: #params["-m"] = 'GTRMIX' # in version 7.2.3, GTRMIX is no longer supported but says GTRCAT # behaves like GTRMIX (http://www.phylo.org/tools/raxmlhpc2.html) params["-m"] = 'GTRGAMMA' elif moltype == PROTEIN: params["-m"] = 'PROTGAMMAmatrixName' else: raise ValueError, "Moltype must be either DNA, RNA, or PROTEIN" if not hasattr(aln, 'toPhylip'): aln = Alignment(aln) seqs, align_map = aln.toPhylip() # generate temp filename for output params["-w"] = "/tmp/" params["-n"] = get_tmp_filename().split("/")[-1] params["-k"] = True params["-p"] = randint(1,100000) params["-x"] = randint(1,100000) ih = '_input_as_multiline_string' raxml_app = Raxml(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=True, SuppressStdout=True) raxml_result = raxml_app(seqs) tree = DndParser(raxml_result['Bootstrap'], constructor=PhyloNode) for node in tree.tips(): node.Name = align_map[node.Name] raxml_result.cleanUp() return tree
[ "def", "build_tree_from_alignment", "(", "aln", ",", "moltype", "=", "DNA", ",", "best_tree", "=", "False", ",", "params", "=", "{", "}", ")", ":", "if", "best_tree", ":", "raise", "NotImplementedError", "if", "'-m'", "not", "in", "params", ":", "if", "m...
Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: best_tree suppport is currently not implemented params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails.
[ "Returns", "a", "tree", "from", "Alignment", "object", "aln", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L767-L822
biocore/burrito-fillings
bfillings/raxml_v730.py
insert_sequences_into_tree
def insert_sequences_into_tree(seqs, moltype, params={}, write_log=True): """Insert sequences into Tree. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be a tree. """ ih = '_input_as_multiline_string' raxml_app = Raxml(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False, HALT_EXEC=False) raxml_result = raxml_app(seqs) # write a log file if write_log: log_fp = join(params["-w"],'log_raxml_'+split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(raxml_result['StdOut'].read()) log_file.close() ''' # getting setup since parsimony doesn't output tree..only jplace, however # it is currently corrupt # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(raxml_result['json'].name, \ output_dir=params["-w"], \ params=guppy_params) ''' # get tree from 'Result Names' new_tree=raxml_result['Result'].readlines() filtered_tree=re.sub('\[I\d+\]','',str(new_tree)) tree = DndParser(filtered_tree, constructor=PhyloNode) raxml_result.cleanUp() return tree
python
def insert_sequences_into_tree(seqs, moltype, params={}, write_log=True): """Insert sequences into Tree. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be a tree. """ ih = '_input_as_multiline_string' raxml_app = Raxml(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False, HALT_EXEC=False) raxml_result = raxml_app(seqs) # write a log file if write_log: log_fp = join(params["-w"],'log_raxml_'+split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(raxml_result['StdOut'].read()) log_file.close() ''' # getting setup since parsimony doesn't output tree..only jplace, however # it is currently corrupt # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(raxml_result['json'].name, \ output_dir=params["-w"], \ params=guppy_params) ''' # get tree from 'Result Names' new_tree=raxml_result['Result'].readlines() filtered_tree=re.sub('\[I\d+\]','',str(new_tree)) tree = DndParser(filtered_tree, constructor=PhyloNode) raxml_result.cleanUp() return tree
[ "def", "insert_sequences_into_tree", "(", "seqs", ",", "moltype", ",", "params", "=", "{", "}", ",", "write_log", "=", "True", ")", ":", "ih", "=", "'_input_as_multiline_string'", "raxml_app", "=", "Raxml", "(", "params", "=", "params", ",", "InputHandler", ...
Insert sequences into Tree. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be a tree.
[ "Insert", "sequences", "into", "Tree", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L825-L875
biocore/burrito-fillings
bfillings/raxml_v730.py
Raxml._format_output
def _format_output(self, outfile_name, out_type): """ Prepend proper output prefix to output filename """ outfile_name = self._absolute(outfile_name) outparts = outfile_name.split("/") outparts[-1] = self._out_format % (out_type, outparts[-1] ) return '/'.join(outparts)
python
def _format_output(self, outfile_name, out_type): """ Prepend proper output prefix to output filename """ outfile_name = self._absolute(outfile_name) outparts = outfile_name.split("/") outparts[-1] = self._out_format % (out_type, outparts[-1] ) return '/'.join(outparts)
[ "def", "_format_output", "(", "self", ",", "outfile_name", ",", "out_type", ")", ":", "outfile_name", "=", "self", ".", "_absolute", "(", "outfile_name", ")", "outparts", "=", "outfile_name", ".", "split", "(", "\"/\"", ")", "outparts", "[", "-", "1", "]",...
Prepend proper output prefix to output filename
[ "Prepend", "proper", "output", "prefix", "to", "output", "filename" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L468-L475
biocore/burrito-fillings
bfillings/raxml_v730.py
Raxml._checkpoint_out_filenames
def _checkpoint_out_filenames(self): """ RAxML generates a crapload of checkpoint files so need to walk directory to collect names of all of them. """ out_filenames = [] if self.Parameters['-n'].isOn(): out_name = str(self.Parameters['-n'].Value) walk_root = self.WorkingDir if self.Parameters['-w'].isOn(): walk_root = str(self.Parameters['-w'].Value) for tup in walk(walk_root): dpath, dnames, dfiles = tup if dpath == walk_root: for gen_file in dfiles: if out_name in gen_file and "checkpoint" in gen_file: out_filenames.append(walk_root + gen_file) break else: raise ValueError, "No output file specified." return out_filenames
python
def _checkpoint_out_filenames(self): """ RAxML generates a crapload of checkpoint files so need to walk directory to collect names of all of them. """ out_filenames = [] if self.Parameters['-n'].isOn(): out_name = str(self.Parameters['-n'].Value) walk_root = self.WorkingDir if self.Parameters['-w'].isOn(): walk_root = str(self.Parameters['-w'].Value) for tup in walk(walk_root): dpath, dnames, dfiles = tup if dpath == walk_root: for gen_file in dfiles: if out_name in gen_file and "checkpoint" in gen_file: out_filenames.append(walk_root + gen_file) break else: raise ValueError, "No output file specified." return out_filenames
[ "def", "_checkpoint_out_filenames", "(", "self", ")", ":", "out_filenames", "=", "[", "]", "if", "self", ".", "Parameters", "[", "'-n'", "]", ".", "isOn", "(", ")", ":", "out_name", "=", "str", "(", "self", ".", "Parameters", "[", "'-n'", "]", ".", "...
RAxML generates a crapload of checkpoint files so need to walk directory to collect names of all of them.
[ "RAxML", "generates", "a", "crapload", "of", "checkpoint", "files", "so", "need", "to", "walk", "directory", "to", "collect", "names", "of", "all", "of", "them", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L613-L634
biocore/burrito-fillings
bfillings/raxml_v730.py
Raxml._handle_app_result_build_failure
def _handle_app_result_build_failure(self,out,err,exit_status,result_paths): """ Catch the error when files are not produced """ try: raise ApplicationError, \ 'RAxML failed to produce an output file due to the following error: \n\n%s ' \ % err.read() except: raise ApplicationError,\ 'RAxML failed to run properly.'
python
def _handle_app_result_build_failure(self,out,err,exit_status,result_paths): """ Catch the error when files are not produced """ try: raise ApplicationError, \ 'RAxML failed to produce an output file due to the following error: \n\n%s ' \ % err.read() except: raise ApplicationError,\ 'RAxML failed to run properly.'
[ "def", "_handle_app_result_build_failure", "(", "self", ",", "out", ",", "err", ",", "exit_status", ",", "result_paths", ")", ":", "try", ":", "raise", "ApplicationError", ",", "'RAxML failed to produce an output file due to the following error: \\n\\n%s '", "%", "err", "...
Catch the error when files are not produced
[ "Catch", "the", "error", "when", "files", "are", "not", "produced" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/raxml_v730.py#L636-L645
kushaldas/retask
retask/queue.py
Queue.names
def names(self): """ Returns a list of queues available, ``None`` if no such queues found. Remember this will only shows queues with at least one item enqueued. """ data = None if not self.connected: raise ConnectionError('Queue is not connected') try: data = self.rdb.keys("retaskqueue-*") except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return [name[12:] for name in data]
python
def names(self): """ Returns a list of queues available, ``None`` if no such queues found. Remember this will only shows queues with at least one item enqueued. """ data = None if not self.connected: raise ConnectionError('Queue is not connected') try: data = self.rdb.keys("retaskqueue-*") except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return [name[12:] for name in data]
[ "def", "names", "(", "self", ")", ":", "data", "=", "None", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "try", ":", "data", "=", "self", ".", "rdb", ".", "keys", "(", "\"retaskqueue-*\"", ...
Returns a list of queues available, ``None`` if no such queues found. Remember this will only shows queues with at least one item enqueued.
[ "Returns", "a", "list", "of", "queues", "available", "None", "if", "no", "such", "queues", "found", ".", "Remember", "this", "will", "only", "shows", "queues", "with", "at", "least", "one", "item", "enqueued", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L61-L76
kushaldas/retask
retask/queue.py
Queue.length
def length(self): """ Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`. """ if not self.connected: raise ConnectionError('Queue is not connected') try: length = self.rdb.llen(self._name) except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return length
python
def length(self): """ Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`. """ if not self.connected: raise ConnectionError('Queue is not connected') try: length = self.rdb.llen(self._name) except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return length
[ "def", "length", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "try", ":", "length", "=", "self", ".", "rdb", ".", "llen", "(", "self", ".", "_name", ")", "except", ...
Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`.
[ "Gives", "the", "length", "of", "the", "queue", ".", "Returns", "None", "if", "the", "queue", "is", "not", "connected", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L79-L96
kushaldas/retask
retask/queue.py
Queue.connect
def connect(self): """ Creates the connection with the redis server. Return ``True`` if the connection works, else returns ``False``. It does not take any arguments. :return: ``Boolean`` value .. note:: After creating the ``Queue`` object the user should call the ``connect`` method to create the connection. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True """ config = self.config self.rdb = redis.Redis(config['host'], config['port'], config['db'],\ config['password']) try: info = self.rdb.info() self.connected = True except redis.ConnectionError: return False return True
python
def connect(self): """ Creates the connection with the redis server. Return ``True`` if the connection works, else returns ``False``. It does not take any arguments. :return: ``Boolean`` value .. note:: After creating the ``Queue`` object the user should call the ``connect`` method to create the connection. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True """ config = self.config self.rdb = redis.Redis(config['host'], config['port'], config['db'],\ config['password']) try: info = self.rdb.info() self.connected = True except redis.ConnectionError: return False return True
[ "def", "connect", "(", "self", ")", ":", "config", "=", "self", ".", "config", "self", ".", "rdb", "=", "redis", ".", "Redis", "(", "config", "[", "'host'", "]", ",", "config", "[", "'port'", "]", ",", "config", "[", "'db'", "]", ",", "config", "...
Creates the connection with the redis server. Return ``True`` if the connection works, else returns ``False``. It does not take any arguments. :return: ``Boolean`` value .. note:: After creating the ``Queue`` object the user should call the ``connect`` method to create the connection. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True
[ "Creates", "the", "connection", "with", "the", "redis", "server", ".", "Return", "True", "if", "the", "connection", "works", "else", "returns", "False", ".", "It", "does", "not", "take", "any", "arguments", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L98-L128
kushaldas/retask
retask/queue.py
Queue.wait
def wait(self, wait_time=0): """ Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts. :arg wait_time: Time in seconds to wait, default is infinite. :return: :class:`~retask.task.Task` object from the queue or False if it timeouts. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> task = q.wait() >>> print task.data {u'name': u'kushal'} .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.brpop(self._name, wait_time) if data: task = Task() task.__dict__ = json.loads(data[1]) return task else: return False
python
def wait(self, wait_time=0): """ Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts. :arg wait_time: Time in seconds to wait, default is infinite. :return: :class:`~retask.task.Task` object from the queue or False if it timeouts. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> task = q.wait() >>> print task.data {u'name': u'kushal'} .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.brpop(self._name, wait_time) if data: task = Task() task.__dict__ = json.loads(data[1]) return task else: return False
[ "def", "wait", "(", "self", ",", "wait_time", "=", "0", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "data", "=", "self", ".", "rdb", ".", "brpop", "(", "self", ".", "_name", ",...
Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts. :arg wait_time: Time in seconds to wait, default is infinite. :return: :class:`~retask.task.Task` object from the queue or False if it timeouts. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> task = q.wait() >>> print task.data {u'name': u'kushal'} .. note:: This is a blocking call, you can specity wait_time argument for timeout.
[ "Returns", "a", ":", "class", ":", "~retask", ".", "task", ".", "Task", "object", "from", "the", "queue", ".", "Returns", "False", "if", "it", "timeouts", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L130-L162
kushaldas/retask
retask/queue.py
Queue.dequeue
def dequeue(self): """ Returns a :class:`~retask.task.Task` object from the queue. Returns ``None`` if the queue is empty. :return: :class:`~retask.task.Task` object from the queue If the queue is not connected then it will raise :class:`retask.ConnectionError` .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> t = q.dequeue() >>> print t.data {u'name': u'kushal'} """ if not self.connected: raise ConnectionError('Queue is not connected') if self.rdb.llen(self._name) == 0: return None data = self.rdb.rpop(self._name) if not data: return None if isinstance(data, six.binary_type): data = six.text_type(data, 'utf-8', errors = 'replace') task = Task() task.__dict__ = json.loads(data) return task
python
def dequeue(self): """ Returns a :class:`~retask.task.Task` object from the queue. Returns ``None`` if the queue is empty. :return: :class:`~retask.task.Task` object from the queue If the queue is not connected then it will raise :class:`retask.ConnectionError` .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> t = q.dequeue() >>> print t.data {u'name': u'kushal'} """ if not self.connected: raise ConnectionError('Queue is not connected') if self.rdb.llen(self._name) == 0: return None data = self.rdb.rpop(self._name) if not data: return None if isinstance(data, six.binary_type): data = six.text_type(data, 'utf-8', errors = 'replace') task = Task() task.__dict__ = json.loads(data) return task
[ "def", "dequeue", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "if", "self", ".", "rdb", ".", "llen", "(", "self", ".", "_name", ")", "==", "0", ":", "return", "Non...
Returns a :class:`~retask.task.Task` object from the queue. Returns ``None`` if the queue is empty. :return: :class:`~retask.task.Task` object from the queue If the queue is not connected then it will raise :class:`retask.ConnectionError` .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> t = q.dequeue() >>> print t.data {u'name': u'kushal'}
[ "Returns", "a", ":", "class", ":", "~retask", ".", "task", ".", "Task", "object", "from", "the", "queue", ".", "Returns", "None", "if", "the", "queue", "is", "empty", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L164-L198
kushaldas/retask
retask/queue.py
Queue.enqueue
def enqueue(self, task): """ Enqueues the given :class:`~retask.task.Task` object to the queue and returns a :class:`~retask.queue.Job` object. :arg task: ::class:`~retask.task.Task` object :return: :class:`~retask.queue.Job` object If the queue is not connected then it will raise :class:`retask.ConnectionError`. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> from retask.task import Task >>> task = Task({'name':'kushal'}) >>> job = q.enqueue(task) """ if not self.connected: raise ConnectionError('Queue is not connected') try: #We can set the value to the queue job = Job(self.rdb) task.urn = job.urn text = json.dumps(task.__dict__) self.rdb.lpush(self._name, text) except Exception as err: return False return job
python
def enqueue(self, task): """ Enqueues the given :class:`~retask.task.Task` object to the queue and returns a :class:`~retask.queue.Job` object. :arg task: ::class:`~retask.task.Task` object :return: :class:`~retask.queue.Job` object If the queue is not connected then it will raise :class:`retask.ConnectionError`. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> from retask.task import Task >>> task = Task({'name':'kushal'}) >>> job = q.enqueue(task) """ if not self.connected: raise ConnectionError('Queue is not connected') try: #We can set the value to the queue job = Job(self.rdb) task.urn = job.urn text = json.dumps(task.__dict__) self.rdb.lpush(self._name, text) except Exception as err: return False return job
[ "def", "enqueue", "(", "self", ",", "task", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "try", ":", "#We can set the value to the queue", "job", "=", "Job", "(", "self", ".", "rdb", ...
Enqueues the given :class:`~retask.task.Task` object to the queue and returns a :class:`~retask.queue.Job` object. :arg task: ::class:`~retask.task.Task` object :return: :class:`~retask.queue.Job` object If the queue is not connected then it will raise :class:`retask.ConnectionError`. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True >>> from retask.task import Task >>> task = Task({'name':'kushal'}) >>> job = q.enqueue(task)
[ "Enqueues", "the", "given", ":", "class", ":", "~retask", ".", "task", ".", "Task", "object", "to", "the", "queue", "and", "returns", "a", ":", "class", ":", "~retask", ".", "queue", ".", "Job", "object", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L200-L234
kushaldas/retask
retask/queue.py
Queue.send
def send(self, task, result, expire=60): """ Sends the result back to the producer. This should be called if only you want to return the result in async manner. :arg task: ::class:`~retask.task.Task` object :arg result: Result data to be send back. Should be in JSON serializable. :arg expire: Time in seconds after the key expires. Default is 60 seconds. """ self.rdb.lpush(task.urn, json.dumps(result)) self.rdb.expire(task.urn, expire)
python
def send(self, task, result, expire=60): """ Sends the result back to the producer. This should be called if only you want to return the result in async manner. :arg task: ::class:`~retask.task.Task` object :arg result: Result data to be send back. Should be in JSON serializable. :arg expire: Time in seconds after the key expires. Default is 60 seconds. """ self.rdb.lpush(task.urn, json.dumps(result)) self.rdb.expire(task.urn, expire)
[ "def", "send", "(", "self", ",", "task", ",", "result", ",", "expire", "=", "60", ")", ":", "self", ".", "rdb", ".", "lpush", "(", "task", ".", "urn", ",", "json", ".", "dumps", "(", "result", ")", ")", "self", ".", "rdb", ".", "expire", "(", ...
Sends the result back to the producer. This should be called if only you want to return the result in async manner. :arg task: ::class:`~retask.task.Task` object :arg result: Result data to be send back. Should be in JSON serializable. :arg expire: Time in seconds after the key expires. Default is 60 seconds.
[ "Sends", "the", "result", "back", "to", "the", "producer", ".", "This", "should", "be", "called", "if", "only", "you", "want", "to", "return", "the", "result", "in", "async", "manner", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L236-L246
kushaldas/retask
retask/queue.py
Queue.find
def find(self, obj): """Returns the index of the given object in the queue, it might be string which will be searched inside each task. :arg obj: object we are looking :return: -1 if the object is not found or else the location of the task """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.lrange(self._name, 0, -1) for i, datum in enumerate(data): if datum.find(str(obj)) != -1: return i return -1
python
def find(self, obj): """Returns the index of the given object in the queue, it might be string which will be searched inside each task. :arg obj: object we are looking :return: -1 if the object is not found or else the location of the task """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.lrange(self._name, 0, -1) for i, datum in enumerate(data): if datum.find(str(obj)) != -1: return i return -1
[ "def", "find", "(", "self", ",", "obj", ")", ":", "if", "not", "self", ".", "connected", ":", "raise", "ConnectionError", "(", "'Queue is not connected'", ")", "data", "=", "self", ".", "rdb", ".", "lrange", "(", "self", ".", "_name", ",", "0", ",", ...
Returns the index of the given object in the queue, it might be string which will be searched inside each task. :arg obj: object we are looking :return: -1 if the object is not found or else the location of the task
[ "Returns", "the", "index", "of", "the", "given", "object", "in", "the", "queue", "it", "might", "be", "string", "which", "will", "be", "searched", "inside", "each", "task", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L253-L268
kushaldas/retask
retask/queue.py
Job.result
def result(self): """ Returns the result from the worker for this job. This is used to pass result in async way. """ if self.__result: return self.__result data = self.rdb.rpop(self.urn) if data: self.rdb.delete(self.urn) data = json.loads(data) self.__result = data return data else: return None
python
def result(self): """ Returns the result from the worker for this job. This is used to pass result in async way. """ if self.__result: return self.__result data = self.rdb.rpop(self.urn) if data: self.rdb.delete(self.urn) data = json.loads(data) self.__result = data return data else: return None
[ "def", "result", "(", "self", ")", ":", "if", "self", ".", "__result", ":", "return", "self", ".", "__result", "data", "=", "self", ".", "rdb", ".", "rpop", "(", "self", ".", "urn", ")", "if", "data", ":", "self", ".", "rdb", ".", "delete", "(", ...
Returns the result from the worker for this job. This is used to pass result in async way.
[ "Returns", "the", "result", "from", "the", "worker", "for", "this", "job", ".", "This", "is", "used", "to", "pass", "result", "in", "async", "way", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L283-L297
kushaldas/retask
retask/queue.py
Job.wait
def wait(self, wait_time=0): """ Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if self.__result: return True data = self.rdb.brpop(self.urn, wait_time) if data: self.rdb.delete(self.urn) data = json.loads(data[1]) self.__result = data return True else: return False
python
def wait(self, wait_time=0): """ Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if self.__result: return True data = self.rdb.brpop(self.urn, wait_time) if data: self.rdb.delete(self.urn) data = json.loads(data[1]) self.__result = data return True else: return False
[ "def", "wait", "(", "self", ",", "wait_time", "=", "0", ")", ":", "if", "self", ".", "__result", ":", "return", "True", "data", "=", "self", ".", "rdb", ".", "brpop", "(", "self", ".", "urn", ",", "wait_time", ")", "if", "data", ":", "self", ".",...
Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout.
[ "Blocking", "call", "to", "check", "if", "the", "worker", "returns", "the", "result", ".", "One", "can", "use", "job", ".", "result", "after", "this", "call", "returns", "True", "." ]
train
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L299-L322
matrix-org/pushbaby
pushbaby/pushconnection.py
PushConnection.messages_in_flight
def messages_in_flight(self): """ Returns True if there are messages waiting to be sent or that we're still waiting to see if errors occur for. """ self.prune_sent() if not self.send_queue.empty() or len(self.sent) > 0: return True return False
python
def messages_in_flight(self): """ Returns True if there are messages waiting to be sent or that we're still waiting to see if errors occur for. """ self.prune_sent() if not self.send_queue.empty() or len(self.sent) > 0: return True return False
[ "def", "messages_in_flight", "(", "self", ")", ":", "self", ".", "prune_sent", "(", ")", "if", "not", "self", ".", "send_queue", ".", "empty", "(", ")", "or", "len", "(", "self", ".", "sent", ")", ">", "0", ":", "return", "True", "return", "False" ]
Returns True if there are messages waiting to be sent or that we're still waiting to see if errors occur for.
[ "Returns", "True", "if", "there", "are", "messages", "waiting", "to", "be", "sent", "or", "that", "we", "re", "still", "waiting", "to", "see", "if", "errors", "occur", "for", "." ]
train
https://github.com/matrix-org/pushbaby/blob/d3265e32dba12cb25474cb9383481def4a8b3bbe/pushbaby/pushconnection.py#L221-L229
matrix-org/pushbaby
pushbaby/pushconnection.py
PushConnection._reallysend
def _reallysend(self, payload, token, expiration=None, priority=None, identifier=None): """ Args: payload (dict): The payload dictionary of the push to send descriptor (any): Opaque variable that is passed back to the pushbaby on failure """ if not self.alive: raise ConnectionDeadException() if not self.useable: raise ConnectionDeadException() seq = self._nextSeq() if seq >= PushConnection.MAX_PUSHES_PER_CONNECTION: # IDs are 4 byte so rather than worry about wrapping IDs, just make a new connection # Note we don't close the connection because we want to wait to see if any errors arrive self._retire_connection() payload_str = json_for_payload(truncate(payload)) items = '' items += self._apns_item(PushConnection.ITEM_DEVICE_TOKEN, token) items += self._apns_item(PushConnection.ITEM_PAYLOAD, payload_str) items += self._apns_item(PushConnection.ITEM_IDENTIFIER, seq) if expiration: items += self._apns_item(PushConnection.ITEM_EXPIRATION, expiration) if priority: items += self._apns_item(PushConnection.ITEM_PRIORITY, priority) apnsFrame = struct.pack("!BI", PushConnection.COMMAND_SENDPUSH, len(items)) + items try: written = 0 while written < len(apnsFrame): written += self.sock.send(apnsFrame[written:]) except: logger.exception("Caught exception sending push") raise self.sent[seq] = PushConnection.SentMessage( time.time(), token, payload, expiration, priority, identifier ) self.last_push_sent = time.time()
python
def _reallysend(self, payload, token, expiration=None, priority=None, identifier=None): """ Args: payload (dict): The payload dictionary of the push to send descriptor (any): Opaque variable that is passed back to the pushbaby on failure """ if not self.alive: raise ConnectionDeadException() if not self.useable: raise ConnectionDeadException() seq = self._nextSeq() if seq >= PushConnection.MAX_PUSHES_PER_CONNECTION: # IDs are 4 byte so rather than worry about wrapping IDs, just make a new connection # Note we don't close the connection because we want to wait to see if any errors arrive self._retire_connection() payload_str = json_for_payload(truncate(payload)) items = '' items += self._apns_item(PushConnection.ITEM_DEVICE_TOKEN, token) items += self._apns_item(PushConnection.ITEM_PAYLOAD, payload_str) items += self._apns_item(PushConnection.ITEM_IDENTIFIER, seq) if expiration: items += self._apns_item(PushConnection.ITEM_EXPIRATION, expiration) if priority: items += self._apns_item(PushConnection.ITEM_PRIORITY, priority) apnsFrame = struct.pack("!BI", PushConnection.COMMAND_SENDPUSH, len(items)) + items try: written = 0 while written < len(apnsFrame): written += self.sock.send(apnsFrame[written:]) except: logger.exception("Caught exception sending push") raise self.sent[seq] = PushConnection.SentMessage( time.time(), token, payload, expiration, priority, identifier ) self.last_push_sent = time.time()
[ "def", "_reallysend", "(", "self", ",", "payload", ",", "token", ",", "expiration", "=", "None", ",", "priority", "=", "None", ",", "identifier", "=", "None", ")", ":", "if", "not", "self", ".", "alive", ":", "raise", "ConnectionDeadException", "(", ")",...
Args: payload (dict): The payload dictionary of the push to send descriptor (any): Opaque variable that is passed back to the pushbaby on failure
[ "Args", ":", "payload", "(", "dict", ")", ":", "The", "payload", "dictionary", "of", "the", "push", "to", "send", "descriptor", "(", "any", ")", ":", "Opaque", "variable", "that", "is", "passed", "back", "to", "the", "pushbaby", "on", "failure" ]
train
https://github.com/matrix-org/pushbaby/blob/d3265e32dba12cb25474cb9383481def4a8b3bbe/pushbaby/pushconnection.py#L277-L316
dailymuse/oz
oz/sqlalchemy/middleware.py
SQLAlchemyMiddleware.db
def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
python
def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
[ "def", "db", "(", "self", ",", "connection_string", "=", "None", ")", ":", "connection_string", "=", "connection_string", "or", "self", ".", "settings", "[", "\"db\"", "]", "if", "not", "hasattr", "(", "self", ",", "\"_db_conns\"", ")", ":", "self", ".", ...
Gets the SQLALchemy session for this request
[ "Gets", "the", "SQLALchemy", "session", "for", "this", "request" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/middleware.py#L20-L30
dailymuse/oz
oz/sqlalchemy/middleware.py
SQLAlchemyMiddleware._sqlalchemy_on_finish
def _sqlalchemy_on_finish(self): """ Closes the sqlalchemy transaction. Rolls back if an error occurred. """ if hasattr(self, "_db_conns"): try: if self.get_status() >= 200 and self.get_status() <= 399: for db_conn in self._db_conns.values(): db_conn.commit() else: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
python
def _sqlalchemy_on_finish(self): """ Closes the sqlalchemy transaction. Rolls back if an error occurred. """ if hasattr(self, "_db_conns"): try: if self.get_status() >= 200 and self.get_status() <= 399: for db_conn in self._db_conns.values(): db_conn.commit() else: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
[ "def", "_sqlalchemy_on_finish", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"_db_conns\"", ")", ":", "try", ":", "if", "self", ".", "get_status", "(", ")", ">=", "200", "and", "self", ".", "get_status", "(", ")", "<=", "399", ":", "fo...
Closes the sqlalchemy transaction. Rolls back if an error occurred.
[ "Closes", "the", "sqlalchemy", "transaction", ".", "Rolls", "back", "if", "an", "error", "occurred", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/middleware.py#L32-L53
dailymuse/oz
oz/sqlalchemy/middleware.py
SQLAlchemyMiddleware._sqlalchemy_on_connection_close
def _sqlalchemy_on_connection_close(self): """ Rollsback and closes the active session, since the client disconnected before the request could be completed. """ if hasattr(self, "_db_conns"): try: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
python
def _sqlalchemy_on_connection_close(self): """ Rollsback and closes the active session, since the client disconnected before the request could be completed. """ if hasattr(self, "_db_conns"): try: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
[ "def", "_sqlalchemy_on_connection_close", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"_db_conns\"", ")", ":", "try", ":", "for", "db_conn", "in", "self", ".", "_db_conns", ".", "values", "(", ")", ":", "db_conn", ".", "rollback", "(", ")...
Rollsback and closes the active session, since the client disconnected before the request could be completed.
[ "Rollsback", "and", "closes", "the", "active", "session", "since", "the", "client", "disconnected", "before", "the", "request", "could", "be", "completed", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/middleware.py#L55-L73
michaelpb/omnic
omnic/config/utils.py
use_settings
def use_settings(**kwargs): ''' Context manager to temporarily override settings ''' from omnic import singletons singletons.settings.use_settings_dict(kwargs) yield singletons.settings.use_previous_settings()
python
def use_settings(**kwargs): ''' Context manager to temporarily override settings ''' from omnic import singletons singletons.settings.use_settings_dict(kwargs) yield singletons.settings.use_previous_settings()
[ "def", "use_settings", "(", "*", "*", "kwargs", ")", ":", "from", "omnic", "import", "singletons", "singletons", ".", "settings", ".", "use_settings_dict", "(", "kwargs", ")", "yield", "singletons", ".", "settings", ".", "use_previous_settings", "(", ")" ]
Context manager to temporarily override settings
[ "Context", "manager", "to", "temporarily", "override", "settings" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/config/utils.py#L5-L12
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.add_key
def add_key(self): "Add ssh key to gitlab if necessary" try: with open(self.args.ssh_public_key) as f: public_key = f.read().strip() except: log.debug("No key found in {}".format(self.args.ssh_public_key)) return None g = self.gitlab url = g['url'] + "/user/keys" query = {'private_token': g['token']} keys = requests.get(url, params=query).json() log.debug("looking for '" + public_key + "' in " + str(keys)) if (list(filter(lambda key: key['key'] == public_key, keys))): log.debug(self.args.ssh_public_key + " already exists") return None else: name = 'github2gitlab' log.info("add " + name + " ssh public key from " + self.args.ssh_public_key) query['title'] = name query['key'] = public_key result = requests.post(url, query) if result.status_code != requests.codes.created: log.warn('Key {} already in GitLab. ' 'Possible under a different user. Skipping...' .format(self.args.ssh_public_key)) return public_key
python
def add_key(self): "Add ssh key to gitlab if necessary" try: with open(self.args.ssh_public_key) as f: public_key = f.read().strip() except: log.debug("No key found in {}".format(self.args.ssh_public_key)) return None g = self.gitlab url = g['url'] + "/user/keys" query = {'private_token': g['token']} keys = requests.get(url, params=query).json() log.debug("looking for '" + public_key + "' in " + str(keys)) if (list(filter(lambda key: key['key'] == public_key, keys))): log.debug(self.args.ssh_public_key + " already exists") return None else: name = 'github2gitlab' log.info("add " + name + " ssh public key from " + self.args.ssh_public_key) query['title'] = name query['key'] = public_key result = requests.post(url, query) if result.status_code != requests.codes.created: log.warn('Key {} already in GitLab. ' 'Possible under a different user. Skipping...' .format(self.args.ssh_public_key)) return public_key
[ "def", "add_key", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "args", ".", "ssh_public_key", ")", "as", "f", ":", "public_key", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "except", ":", "log", ".", "debug", ...
Add ssh key to gitlab if necessary
[ "Add", "ssh", "key", "to", "gitlab", "if", "necessary" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L254-L281
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.add_project
def add_project(self): "Create project in gitlab if it does not exist" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] query = {'private_token': g['token']} if (requests.get(url, params=query).status_code == requests.codes.ok): log.debug("project " + url + " already exists") return None else: log.info("add project " + g['repo']) url = g['url'] + "/projects" query['public'] = 'true' query['namespace'] = g['namespace'] query['name'] = g['name'] result = requests.post(url, params=query) if result.status_code != requests.codes.created: raise ValueError(result.text) log.debug("project " + g['repo'] + " added: " + result.text) return result.json()
python
def add_project(self): "Create project in gitlab if it does not exist" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] query = {'private_token': g['token']} if (requests.get(url, params=query).status_code == requests.codes.ok): log.debug("project " + url + " already exists") return None else: log.info("add project " + g['repo']) url = g['url'] + "/projects" query['public'] = 'true' query['namespace'] = g['namespace'] query['name'] = g['name'] result = requests.post(url, params=query) if result.status_code != requests.codes.created: raise ValueError(result.text) log.debug("project " + g['repo'] + " added: " + result.text) return result.json()
[ "def", "add_project", "(", "self", ")", ":", "g", "=", "self", ".", "gitlab", "url", "=", "g", "[", "'url'", "]", "+", "\"/projects/\"", "+", "g", "[", "'repo'", "]", "query", "=", "{", "'private_token'", ":", "g", "[", "'token'", "]", "}", "if", ...
Create project in gitlab if it does not exist
[ "Create", "project", "in", "gitlab", "if", "it", "does", "not", "exist" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L283-L302
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.unprotect_branches
def unprotect_branches(self): "Unprotect branches of the GitLab project" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] + "/repository/branches" query = {'private_token': g['token']} unprotected = 0 r = requests.get(url, params=query) r.raise_for_status() for branch in r.json(): if branch['protected']: r = requests.put(url + "/" + branch['name'] + "/unprotect", params=query) r.raise_for_status() unprotected += 1 return unprotected
python
def unprotect_branches(self): "Unprotect branches of the GitLab project" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] + "/repository/branches" query = {'private_token': g['token']} unprotected = 0 r = requests.get(url, params=query) r.raise_for_status() for branch in r.json(): if branch['protected']: r = requests.put(url + "/" + branch['name'] + "/unprotect", params=query) r.raise_for_status() unprotected += 1 return unprotected
[ "def", "unprotect_branches", "(", "self", ")", ":", "g", "=", "self", ".", "gitlab", "url", "=", "g", "[", "'url'", "]", "+", "\"/projects/\"", "+", "g", "[", "'repo'", "]", "+", "\"/repository/branches\"", "query", "=", "{", "'private_token'", ":", "g",...
Unprotect branches of the GitLab project
[ "Unprotect", "branches", "of", "the", "GitLab", "project" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L304-L318
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.json_loads
def json_loads(payload): "Log the payload that cannot be parsed" try: return json.loads(payload) except ValueError as e: log.error("unable to json.loads(" + payload + ")") raise e
python
def json_loads(payload): "Log the payload that cannot be parsed" try: return json.loads(payload) except ValueError as e: log.error("unable to json.loads(" + payload + ")") raise e
[ "def", "json_loads", "(", "payload", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "payload", ")", "except", "ValueError", "as", "e", ":", "log", ".", "error", "(", "\"unable to json.loads(\"", "+", "payload", "+", "\")\"", ")", "raise", "e"...
Log the payload that cannot be parsed
[ "Log", "the", "payload", "that", "cannot", "be", "parsed" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L434-L440
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.get_pull_requests
def get_pull_requests(self): "https://developer.github.com/v3/pulls/#list-pull-requests" g = self.github query = {'state': 'all'} if self.args.github_token: query['access_token'] = g['token'] def f(pull): if self.args.ignore_closed: return (pull['state'] == 'opened' or (pull['state'] == 'closed' and pull['merged_at'])) else: return True pulls = filter(f, self.get(g['url'] + "/repos/" + g['repo'] + "/pulls", query, self.args.cache)) return dict([(str(pull['number']), pull) for pull in pulls])
python
def get_pull_requests(self): "https://developer.github.com/v3/pulls/#list-pull-requests" g = self.github query = {'state': 'all'} if self.args.github_token: query['access_token'] = g['token'] def f(pull): if self.args.ignore_closed: return (pull['state'] == 'opened' or (pull['state'] == 'closed' and pull['merged_at'])) else: return True pulls = filter(f, self.get(g['url'] + "/repos/" + g['repo'] + "/pulls", query, self.args.cache)) return dict([(str(pull['number']), pull) for pull in pulls])
[ "def", "get_pull_requests", "(", "self", ")", ":", "g", "=", "self", ".", "github", "query", "=", "{", "'state'", ":", "'all'", "}", "if", "self", ".", "args", ".", "github_token", ":", "query", "[", "'access_token'", "]", "=", "g", "[", "'token'", "...
https://developer.github.com/v3/pulls/#list-pull-requests
[ "https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "pulls", "/", "#list", "-", "pull", "-", "requests" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L474-L490
ubc/github2gitlab
github2gitlab/main.py
GitHub2GitLab.get_merge_requests
def get_merge_requests(self): "http://doc.gitlab.com/ce/api/merge_requests.html" g = self.gitlab merges = self.get(g['url'] + "/projects/" + g['repo'] + "/merge_requests", {'private_token': g['token'], 'state': 'all'}, cache=False) return dict([(str(merge['id']), merge) for merge in merges])
python
def get_merge_requests(self): "http://doc.gitlab.com/ce/api/merge_requests.html" g = self.gitlab merges = self.get(g['url'] + "/projects/" + g['repo'] + "/merge_requests", {'private_token': g['token'], 'state': 'all'}, cache=False) return dict([(str(merge['id']), merge) for merge in merges])
[ "def", "get_merge_requests", "(", "self", ")", ":", "g", "=", "self", ".", "gitlab", "merges", "=", "self", ".", "get", "(", "g", "[", "'url'", "]", "+", "\"/projects/\"", "+", "g", "[", "'repo'", "]", "+", "\"/merge_requests\"", ",", "{", "'private_to...
http://doc.gitlab.com/ce/api/merge_requests.html
[ "http", ":", "//", "doc", ".", "gitlab", ".", "com", "/", "ce", "/", "api", "/", "merge_requests", ".", "html" ]
train
https://github.com/ubc/github2gitlab/blob/795898f6d438621fa0c996a7156d70c382ff0493/github2gitlab/main.py#L492-L499
castelao/oceansdb
oceansdb/common.py
cropIndices
def cropIndices(dims, lat, lon, depth=None, doy=None): """ Return the indices to crop dataset Assuming that the dataset have the dimensions given by dims, this function return the indices to conform with the given coordinates (lat, lon, ...) """ dims_out = {} idx = {} yn = slice( np.nonzero(dims['lat'] <= lat.min())[0].max(), np.nonzero(dims['lat'] >= lat.max())[0].min() + 1) dims_out['lat'] = np.atleast_1d(dims['lat'][yn]) idx['yn'] = yn lon_ext = np.array( (dims['lon'] - 2*360).tolist() + (dims['lon'] - 360).tolist() + dims['lon'].tolist() + (dims['lon'] + 360).tolist()) xn_ext = list(4 * list(range(dims['lon'].shape[0]))) xn_start = np.nonzero(lon_ext <= lon.min())[0].max() xn_end = np.nonzero(lon_ext >= lon.max())[0].min() xn = xn_ext[xn_start:xn_end+1] dims_out['lon'] = np.atleast_1d(lon_ext[xn_start:xn_end+1]) idx['xn'] = xn if depth is not None: zn = slice( np.nonzero(dims['depth'] <= depth.min())[0].max(), np.nonzero(dims['depth'] >= min(dims['depth'].max(), depth.max()) )[0].min() + 1 ) # If a higher degree interpolation system uses more than one data # point in the edge, I should extend this selection one point on # each side, without go beyond 0 # if zn.start < 0: # zn = slice(0, zn.stop, zn.step) dims_out['depth'] = np.atleast_1d(dims['depth'][zn]) idx['zn'] = zn if doy is not None: # Source has only one time, like total mean field, or annual mean. if dims['time'].shape == (1,): dims_out['time'] = dims['time'] idx['tn'] = [0] else: time_ext = np.array( [dims['time'][-1] - 365.25] + dims['time'].tolist() + [dims['time'][0] + 365.25]) tn_ext = list(range(dims['time'].size)) tn_ext = [tn_ext[-1]] + tn_ext + [tn_ext[0]] tn_start = np.nonzero(time_ext <= doy.min())[0].max() tn_end = np.nonzero(time_ext >= doy.max())[0].min() dims_out['time'] = np.atleast_1d(time_ext[tn_start:tn_end+1]) idx['tn'] = tn_ext[tn_start:tn_end+1] return dims_out, idx
python
def cropIndices(dims, lat, lon, depth=None, doy=None): """ Return the indices to crop dataset Assuming that the dataset have the dimensions given by dims, this function return the indices to conform with the given coordinates (lat, lon, ...) """ dims_out = {} idx = {} yn = slice( np.nonzero(dims['lat'] <= lat.min())[0].max(), np.nonzero(dims['lat'] >= lat.max())[0].min() + 1) dims_out['lat'] = np.atleast_1d(dims['lat'][yn]) idx['yn'] = yn lon_ext = np.array( (dims['lon'] - 2*360).tolist() + (dims['lon'] - 360).tolist() + dims['lon'].tolist() + (dims['lon'] + 360).tolist()) xn_ext = list(4 * list(range(dims['lon'].shape[0]))) xn_start = np.nonzero(lon_ext <= lon.min())[0].max() xn_end = np.nonzero(lon_ext >= lon.max())[0].min() xn = xn_ext[xn_start:xn_end+1] dims_out['lon'] = np.atleast_1d(lon_ext[xn_start:xn_end+1]) idx['xn'] = xn if depth is not None: zn = slice( np.nonzero(dims['depth'] <= depth.min())[0].max(), np.nonzero(dims['depth'] >= min(dims['depth'].max(), depth.max()) )[0].min() + 1 ) # If a higher degree interpolation system uses more than one data # point in the edge, I should extend this selection one point on # each side, without go beyond 0 # if zn.start < 0: # zn = slice(0, zn.stop, zn.step) dims_out['depth'] = np.atleast_1d(dims['depth'][zn]) idx['zn'] = zn if doy is not None: # Source has only one time, like total mean field, or annual mean. if dims['time'].shape == (1,): dims_out['time'] = dims['time'] idx['tn'] = [0] else: time_ext = np.array( [dims['time'][-1] - 365.25] + dims['time'].tolist() + [dims['time'][0] + 365.25]) tn_ext = list(range(dims['time'].size)) tn_ext = [tn_ext[-1]] + tn_ext + [tn_ext[0]] tn_start = np.nonzero(time_ext <= doy.min())[0].max() tn_end = np.nonzero(time_ext >= doy.max())[0].min() dims_out['time'] = np.atleast_1d(time_ext[tn_start:tn_end+1]) idx['tn'] = tn_ext[tn_start:tn_end+1] return dims_out, idx
[ "def", "cropIndices", "(", "dims", ",", "lat", ",", "lon", ",", "depth", "=", "None", ",", "doy", "=", "None", ")", ":", "dims_out", "=", "{", "}", "idx", "=", "{", "}", "yn", "=", "slice", "(", "np", ".", "nonzero", "(", "dims", "[", "'lat'", ...
Return the indices to crop dataset Assuming that the dataset have the dimensions given by dims, this function return the indices to conform with the given coordinates (lat, lon, ...)
[ "Return", "the", "indices", "to", "crop", "dataset" ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/common.py#L7-L66
biocore/burrito-fillings
bfillings/seqprep.py
join_paired_end_reads_seqprep
def join_paired_end_reads_seqprep( reads1_infile_path, reads2_infile_path, outfile_label='seqprep', max_overlap_ascii_q_score='J', min_overlap=None, # typical default vs 15 max_mismatch_good_frac=None, # typical default is 0.02, min_frac_matching=None, # typical default is 0.9, phred_64=False, params={}, working_dir=tempfile.gettempdir(), SuppressStderr=True, SuppressStdout=True, HALT_EXEC=False): """ Runs SeqPrep parameters to assemble paired-end reads. -reads1_infile_path : reads1.fastq infile path -reads2_infile_path : reads2.fastq infile path -max_overlap_ascii_q_score : 'J' for Illumina 1.8+ phred+33, representing a score of 41. See: http://en.wikipedia.org/wiki/FASTQ_format -min_overlap : minimum overall base pair overlap to merge two reads -max_mismatch_good_frac : maximum fraction of good quality mismatching bases to overlap reads -min_frac_matching : minimum fraction of matching bases to overlap reads -phred_64 : if input is in phred+64. Output will always be phred+33. -params : other optional SeqPrep parameters NOTE: SeqPrep always outputs gzipped files """ abs_r1_path = os.path.abspath(reads1_infile_path) abs_r2_path = os.path.abspath(reads2_infile_path) infile_paths = [abs_r1_path, abs_r2_path] # check / make absolute infile paths for p in infile_paths: if not os.path.exists(p): raise IOError('Infile not found at: %s' % p) # set up controller seqprep_app = SeqPrep(params=params, WorkingDir=working_dir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) # required by SeqPrep to assemble: seqprep_app.Parameters['-f'].on(abs_r1_path) seqprep_app.Parameters['-r'].on(abs_r2_path) if outfile_label is not None: seqprep_app.Parameters['-s'].on(outfile_label + '_assembled.fastq.gz') seqprep_app.Parameters[ '-1'].on(outfile_label + '_unassembled_R1.fastq.gz') seqprep_app.Parameters[ '-2'].on(outfile_label + '_unassembled_R2.fastq.gz') else: raise ValueError("Must set an outfile_label in order to set", " the -s, -1, & -2 options!") if min_overlap is not None: if isinstance(min_overlap, int) and min_overlap > 0: seqprep_app.Parameters['-o'].on(min_overlap) else: raise ValueError("min_overlap must be an int >= 0!") if max_mismatch_good_frac is not None: if isinstance(max_mismatch_good_frac, float) and 0.0 < max_mismatch_good_frac <= 1.0: seqprep_app.Parameters['-m'].on(max_mismatch_good_frac) else: raise ValueError( "max_mismatch_good_frac must be a float between 0.0-1.0!") if min_frac_matching is not None: if isinstance(min_frac_matching, float) and 0.0 < min_frac_matching <= 1.0: seqprep_app.Parameters['-n'].on(min_frac_matching) else: raise ValueError( "min_frac_matching must be a float between 0.0-1.0!") if max_overlap_ascii_q_score is not None: if isinstance(max_overlap_ascii_q_score, str) \ and len(max_overlap_ascii_q_score) == 1: seqprep_app.Parameters['-y'].on(max_overlap_ascii_q_score) else: raise ValueError("max_overlap_ascii_q_score must be a single", " ASCII character string. e.g. \'J\'!") # if input is phred+64 if phred_64 is True: seqprep_app.Parameters['-6'].on() # run assembler result = seqprep_app() # Store output file path data to dict path_dict = {} path_dict['Assembled'] = result['Assembled'].name path_dict['UnassembledReads1'] = result['UnassembledReads1'].name path_dict['UnassembledReads2'] = result['UnassembledReads2'].name # sanity check that files actually exist in path lcoations for path in path_dict.values(): if not os.path.exists(path): raise IOError('Output file not found at: %s' % path) return path_dict
python
def join_paired_end_reads_seqprep( reads1_infile_path, reads2_infile_path, outfile_label='seqprep', max_overlap_ascii_q_score='J', min_overlap=None, # typical default vs 15 max_mismatch_good_frac=None, # typical default is 0.02, min_frac_matching=None, # typical default is 0.9, phred_64=False, params={}, working_dir=tempfile.gettempdir(), SuppressStderr=True, SuppressStdout=True, HALT_EXEC=False): """ Runs SeqPrep parameters to assemble paired-end reads. -reads1_infile_path : reads1.fastq infile path -reads2_infile_path : reads2.fastq infile path -max_overlap_ascii_q_score : 'J' for Illumina 1.8+ phred+33, representing a score of 41. See: http://en.wikipedia.org/wiki/FASTQ_format -min_overlap : minimum overall base pair overlap to merge two reads -max_mismatch_good_frac : maximum fraction of good quality mismatching bases to overlap reads -min_frac_matching : minimum fraction of matching bases to overlap reads -phred_64 : if input is in phred+64. Output will always be phred+33. -params : other optional SeqPrep parameters NOTE: SeqPrep always outputs gzipped files """ abs_r1_path = os.path.abspath(reads1_infile_path) abs_r2_path = os.path.abspath(reads2_infile_path) infile_paths = [abs_r1_path, abs_r2_path] # check / make absolute infile paths for p in infile_paths: if not os.path.exists(p): raise IOError('Infile not found at: %s' % p) # set up controller seqprep_app = SeqPrep(params=params, WorkingDir=working_dir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) # required by SeqPrep to assemble: seqprep_app.Parameters['-f'].on(abs_r1_path) seqprep_app.Parameters['-r'].on(abs_r2_path) if outfile_label is not None: seqprep_app.Parameters['-s'].on(outfile_label + '_assembled.fastq.gz') seqprep_app.Parameters[ '-1'].on(outfile_label + '_unassembled_R1.fastq.gz') seqprep_app.Parameters[ '-2'].on(outfile_label + '_unassembled_R2.fastq.gz') else: raise ValueError("Must set an outfile_label in order to set", " the -s, -1, & -2 options!") if min_overlap is not None: if isinstance(min_overlap, int) and min_overlap > 0: seqprep_app.Parameters['-o'].on(min_overlap) else: raise ValueError("min_overlap must be an int >= 0!") if max_mismatch_good_frac is not None: if isinstance(max_mismatch_good_frac, float) and 0.0 < max_mismatch_good_frac <= 1.0: seqprep_app.Parameters['-m'].on(max_mismatch_good_frac) else: raise ValueError( "max_mismatch_good_frac must be a float between 0.0-1.0!") if min_frac_matching is not None: if isinstance(min_frac_matching, float) and 0.0 < min_frac_matching <= 1.0: seqprep_app.Parameters['-n'].on(min_frac_matching) else: raise ValueError( "min_frac_matching must be a float between 0.0-1.0!") if max_overlap_ascii_q_score is not None: if isinstance(max_overlap_ascii_q_score, str) \ and len(max_overlap_ascii_q_score) == 1: seqprep_app.Parameters['-y'].on(max_overlap_ascii_q_score) else: raise ValueError("max_overlap_ascii_q_score must be a single", " ASCII character string. e.g. \'J\'!") # if input is phred+64 if phred_64 is True: seqprep_app.Parameters['-6'].on() # run assembler result = seqprep_app() # Store output file path data to dict path_dict = {} path_dict['Assembled'] = result['Assembled'].name path_dict['UnassembledReads1'] = result['UnassembledReads1'].name path_dict['UnassembledReads2'] = result['UnassembledReads2'].name # sanity check that files actually exist in path lcoations for path in path_dict.values(): if not os.path.exists(path): raise IOError('Output file not found at: %s' % path) return path_dict
[ "def", "join_paired_end_reads_seqprep", "(", "reads1_infile_path", ",", "reads2_infile_path", ",", "outfile_label", "=", "'seqprep'", ",", "max_overlap_ascii_q_score", "=", "'J'", ",", "min_overlap", "=", "None", ",", "# typical default vs 15", "max_mismatch_good_frac", "="...
Runs SeqPrep parameters to assemble paired-end reads. -reads1_infile_path : reads1.fastq infile path -reads2_infile_path : reads2.fastq infile path -max_overlap_ascii_q_score : 'J' for Illumina 1.8+ phred+33, representing a score of 41. See: http://en.wikipedia.org/wiki/FASTQ_format -min_overlap : minimum overall base pair overlap to merge two reads -max_mismatch_good_frac : maximum fraction of good quality mismatching bases to overlap reads -min_frac_matching : minimum fraction of matching bases to overlap reads -phred_64 : if input is in phred+64. Output will always be phred+33. -params : other optional SeqPrep parameters NOTE: SeqPrep always outputs gzipped files
[ "Runs", "SeqPrep", "parameters", "to", "assemble", "paired", "-", "end", "reads", ".", "-", "reads1_infile_path", ":", "reads1", ".", "fastq", "infile", "path", "-", "reads2_infile_path", ":", "reads2", ".", "fastq", "infile", "path", "-", "max_overlap_ascii_q_s...
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L243-L351
biocore/burrito-fillings
bfillings/seqprep.py
SeqPrep._unassembled_reads1_out_file_name
def _unassembled_reads1_out_file_name(self): """Checks file name is set for reads1 output. Returns absolute path.""" if self.Parameters['-1'].isOn(): unassembled_reads1 = self._absolute( str(self.Parameters['-1'].Value)) else: raise ValueError("No reads1 (flag: -1) output path specified") return unassembled_reads1
python
def _unassembled_reads1_out_file_name(self): """Checks file name is set for reads1 output. Returns absolute path.""" if self.Parameters['-1'].isOn(): unassembled_reads1 = self._absolute( str(self.Parameters['-1'].Value)) else: raise ValueError("No reads1 (flag: -1) output path specified") return unassembled_reads1
[ "def", "_unassembled_reads1_out_file_name", "(", "self", ")", ":", "if", "self", ".", "Parameters", "[", "'-1'", "]", ".", "isOn", "(", ")", ":", "unassembled_reads1", "=", "self", ".", "_absolute", "(", "str", "(", "self", ".", "Parameters", "[", "'-1'", ...
Checks file name is set for reads1 output. Returns absolute path.
[ "Checks", "file", "name", "is", "set", "for", "reads1", "output", ".", "Returns", "absolute", "path", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L125-L133