repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
zetaops/zengine
zengine/client_queue.py
ClientQueue.send_to_default_exchange
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
python
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
[ "def", "send_to_default_exchange", "(", "self", ",", "sess_id", ",", "message", "=", "None", ")", ":", "msg", "=", "json", ".", "dumps", "(", "message", ",", "cls", "=", "ZEngineJSONEncoder", ")", "log", ".", "debug", "(", "\"Sending following message to %s queue through default exchange:\\n%s\"", "%", "(", "sess_id", ",", "msg", ")", ")", "self", ".", "get_channel", "(", ")", ".", "publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "sess_id", ",", "body", "=", "msg", ")" ]
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object.
[ "Send", "messages", "through", "RabbitMQ", "s", "default", "exchange", "which", "will", "be", "delivered", "through", "routing_key", "(", "sess_id", ")", "." ]
b5bc32d3b37bca799f8985be916f04528ac79e4a
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/client_queue.py#L59-L73
train
zetaops/zengine
zengine/client_queue.py
ClientQueue.send_to_prv_exchange
def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """ exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
python
def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """ exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
[ "def", "send_to_prv_exchange", "(", "self", ",", "user_id", ",", "message", "=", "None", ")", ":", "exchange", "=", "'prv_%s'", "%", "user_id", ".", "lower", "(", ")", "msg", "=", "json", ".", "dumps", "(", "message", ",", "cls", "=", "ZEngineJSONEncoder", ")", "log", ".", "debug", "(", "\"Sending following users \\\"%s\\\" exchange:\\n%s \"", "%", "(", "exchange", ",", "msg", ")", ")", "self", ".", "get_channel", "(", ")", ".", "publish", "(", "exchange", "=", "exchange", ",", "routing_key", "=", "''", ",", "body", "=", "msg", ")" ]
Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object
[ "Send", "messages", "through", "logged", "in", "users", "private", "exchange", "." ]
b5bc32d3b37bca799f8985be916f04528ac79e4a
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/client_queue.py#L75-L87
train
cimm-kzn/CGRtools
CGRtools/algorithms/compose.py
Compose.compose
def compose(self, other): """ compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer """ if not isinstance(other, Compose): raise TypeError('CGRContainer or MoleculeContainer [sub]class expected') cgr = self._get_subclass('CGRContainer') common = self._node.keys() & other if not common: if not (isinstance(self, cgr) or isinstance(other, cgr)): return cgr() | self | other return self | other unique_reactant = self._node.keys() - common unique_product = other._node.keys() - common h = cgr() atoms = h._node bonds = [] common_adj = {n: {} for n in common} common_bonds = [] r_atoms = {} r_skin = defaultdict(list) if isinstance(self, cgr): for n in unique_reactant: h.add_atom(self._node[n], n) for m, bond in self._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is broken bond r_bond = bond._reactant if r_bond is None: # skip None>None continue r_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, None) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n]._reactant for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: tmp = [bond._reactant, None] common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) else: for n in unique_reactant: atom = DynAtom.__new__(DynAtom) # add unique atom into CGR atom.__init_copy__(self._node[n], self._node[n]) h.add_atom(atom, n) for m, r_bond in self._adj[n].items(): # unique atom neighbors if m not in atoms: # bond not analyzed yet bond = DynBond.__new__(DynBond) if m in common: # bond to common atoms r_skin[n].append(m) bond.__init_copy__(r_bond, None) else: # bond static bond.__init_copy__(r_bond, r_bond) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n] for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: # analyze only common atoms bonds tmp = [bond, None] # reactant state only common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) p_atoms = {} p_skin = defaultdict(list) if isinstance(other, cgr): for n in unique_product: h.add_atom(other._node[n], n) for m, bond in other._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is new bond p_bond = bond._product if p_bond is None: # skip None>None continue p_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n]._product n_bonds = common_adj[n] for m, bond in other._adj[n].items(): if m in n_bonds: n_bonds[m][1] = bond._product elif m not in p_atoms and m in common: # new bond of reaction p_bond = bond._product if p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) else: for n in unique_product: atom = DynAtom.__new__(DynAtom) atom.__init_copy__(other._node[n], other._node[n]) h.add_atom(atom, n) for m, p_bond in other._adj[n].items(): if m not in atoms: bond = DynBond.__new__(DynBond) if m in common: p_skin[n].append(m) bond.__init_copy__(None, p_bond) else: bond.__init_copy__(p_bond, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n] n_bonds = common_adj[n] for m, p_bond in other._adj[n].items(): if m in n_bonds: # set product state of changed bond n_bonds[m][1] = p_bond elif m not in p_atoms and m in common: # new bond of reaction bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n, r_atom in r_atoms.items(): # prepare common DynAtom's p_atom = p_atoms[n] if r_atom.element != p_atom.element or r_atom.isotope != p_atom.isotope: raise ValueError('atom-to-atom mapping invalid') atom = DynAtom.__new__(DynAtom) atom.__init_copy__(r_atom, p_atom) h.add_atom(atom, n) for n, m, (r_bond, p_bond) in common_bonds: if r_bond is p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, p_bond) h.add_bond(n, m, bond) for n, m, bond in bonds: h.add_bond(n, m, bond) return h
python
def compose(self, other): """ compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer """ if not isinstance(other, Compose): raise TypeError('CGRContainer or MoleculeContainer [sub]class expected') cgr = self._get_subclass('CGRContainer') common = self._node.keys() & other if not common: if not (isinstance(self, cgr) or isinstance(other, cgr)): return cgr() | self | other return self | other unique_reactant = self._node.keys() - common unique_product = other._node.keys() - common h = cgr() atoms = h._node bonds = [] common_adj = {n: {} for n in common} common_bonds = [] r_atoms = {} r_skin = defaultdict(list) if isinstance(self, cgr): for n in unique_reactant: h.add_atom(self._node[n], n) for m, bond in self._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is broken bond r_bond = bond._reactant if r_bond is None: # skip None>None continue r_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, None) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n]._reactant for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: tmp = [bond._reactant, None] common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) else: for n in unique_reactant: atom = DynAtom.__new__(DynAtom) # add unique atom into CGR atom.__init_copy__(self._node[n], self._node[n]) h.add_atom(atom, n) for m, r_bond in self._adj[n].items(): # unique atom neighbors if m not in atoms: # bond not analyzed yet bond = DynBond.__new__(DynBond) if m in common: # bond to common atoms r_skin[n].append(m) bond.__init_copy__(r_bond, None) else: # bond static bond.__init_copy__(r_bond, r_bond) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n] for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: # analyze only common atoms bonds tmp = [bond, None] # reactant state only common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) p_atoms = {} p_skin = defaultdict(list) if isinstance(other, cgr): for n in unique_product: h.add_atom(other._node[n], n) for m, bond in other._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is new bond p_bond = bond._product if p_bond is None: # skip None>None continue p_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n]._product n_bonds = common_adj[n] for m, bond in other._adj[n].items(): if m in n_bonds: n_bonds[m][1] = bond._product elif m not in p_atoms and m in common: # new bond of reaction p_bond = bond._product if p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) else: for n in unique_product: atom = DynAtom.__new__(DynAtom) atom.__init_copy__(other._node[n], other._node[n]) h.add_atom(atom, n) for m, p_bond in other._adj[n].items(): if m not in atoms: bond = DynBond.__new__(DynBond) if m in common: p_skin[n].append(m) bond.__init_copy__(None, p_bond) else: bond.__init_copy__(p_bond, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n] n_bonds = common_adj[n] for m, p_bond in other._adj[n].items(): if m in n_bonds: # set product state of changed bond n_bonds[m][1] = p_bond elif m not in p_atoms and m in common: # new bond of reaction bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n, r_atom in r_atoms.items(): # prepare common DynAtom's p_atom = p_atoms[n] if r_atom.element != p_atom.element or r_atom.isotope != p_atom.isotope: raise ValueError('atom-to-atom mapping invalid') atom = DynAtom.__new__(DynAtom) atom.__init_copy__(r_atom, p_atom) h.add_atom(atom, n) for n, m, (r_bond, p_bond) in common_bonds: if r_bond is p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, p_bond) h.add_bond(n, m, bond) for n, m, bond in bonds: h.add_bond(n, m, bond) return h
[ "def", "compose", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Compose", ")", ":", "raise", "TypeError", "(", "'CGRContainer or MoleculeContainer [sub]class expected'", ")", "cgr", "=", "self", ".", "_get_subclass", "(", "'CGRContainer'", ")", "common", "=", "self", ".", "_node", ".", "keys", "(", ")", "&", "other", "if", "not", "common", ":", "if", "not", "(", "isinstance", "(", "self", ",", "cgr", ")", "or", "isinstance", "(", "other", ",", "cgr", ")", ")", ":", "return", "cgr", "(", ")", "|", "self", "|", "other", "return", "self", "|", "other", "unique_reactant", "=", "self", ".", "_node", ".", "keys", "(", ")", "-", "common", "unique_product", "=", "other", ".", "_node", ".", "keys", "(", ")", "-", "common", "h", "=", "cgr", "(", ")", "atoms", "=", "h", ".", "_node", "bonds", "=", "[", "]", "common_adj", "=", "{", "n", ":", "{", "}", "for", "n", "in", "common", "}", "common_bonds", "=", "[", "]", "r_atoms", "=", "{", "}", "r_skin", "=", "defaultdict", "(", "list", ")", "if", "isinstance", "(", "self", ",", "cgr", ")", ":", "for", "n", "in", "unique_reactant", ":", "h", ".", "add_atom", "(", "self", ".", "_node", "[", "n", "]", ",", "n", ")", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "if", "m", "in", "common", ":", "# bond to common atoms is broken bond", "r_bond", "=", "bond", ".", "_reactant", "if", "r_bond", "is", "None", ":", "# skip None>None", "continue", "r_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "None", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "r_atoms", "[", "n", "]", "=", "self", ".", "_node", "[", "n", "]", ".", "_reactant", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "r_atoms", "and", "m", "in", "common", ":", "tmp", "=", "[", "bond", ".", "_reactant", ",", "None", "]", "common_adj", "[", "n", "]", "[", "m", "]", "=", "common_adj", "[", "m", "]", "[", "n", "]", "=", "tmp", "common_bonds", ".", "append", "(", "(", "n", ",", "m", ",", "tmp", ")", ")", "else", ":", "for", "n", "in", "unique_reactant", ":", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "# add unique atom into CGR", "atom", ".", "__init_copy__", "(", "self", ".", "_node", "[", "n", "]", ",", "self", ".", "_node", "[", "n", "]", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "m", ",", "r_bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "# unique atom neighbors", "if", "m", "not", "in", "atoms", ":", "# bond not analyzed yet", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "if", "m", "in", "common", ":", "# bond to common atoms", "r_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "None", ")", "else", ":", "# bond static", "bond", ".", "__init_copy__", "(", "r_bond", ",", "r_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "r_atoms", "[", "n", "]", "=", "self", ".", "_node", "[", "n", "]", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "r_atoms", "and", "m", "in", "common", ":", "# analyze only common atoms bonds", "tmp", "=", "[", "bond", ",", "None", "]", "# reactant state only", "common_adj", "[", "n", "]", "[", "m", "]", "=", "common_adj", "[", "m", "]", "[", "n", "]", "=", "tmp", "common_bonds", ".", "append", "(", "(", "n", ",", "m", ",", "tmp", ")", ")", "p_atoms", "=", "{", "}", "p_skin", "=", "defaultdict", "(", "list", ")", "if", "isinstance", "(", "other", ",", "cgr", ")", ":", "for", "n", "in", "unique_product", ":", "h", ".", "add_atom", "(", "other", ".", "_node", "[", "n", "]", ",", "n", ")", "for", "m", ",", "bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "if", "m", "in", "common", ":", "# bond to common atoms is new bond", "p_bond", "=", "bond", ".", "_product", "if", "p_bond", "is", "None", ":", "# skip None>None", "continue", "p_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "p_atoms", "[", "n", "]", "=", "other", ".", "_node", "[", "n", "]", ".", "_product", "n_bonds", "=", "common_adj", "[", "n", "]", "for", "m", ",", "bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "in", "n_bonds", ":", "n_bonds", "[", "m", "]", "[", "1", "]", "=", "bond", ".", "_product", "elif", "m", "not", "in", "p_atoms", "and", "m", "in", "common", ":", "# new bond of reaction", "p_bond", "=", "bond", ".", "_product", "if", "p_bond", "is", "None", ":", "# skip None>None", "continue", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "else", ":", "for", "n", "in", "unique_product", ":", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "atom", ".", "__init_copy__", "(", "other", ".", "_node", "[", "n", "]", ",", "other", ".", "_node", "[", "n", "]", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "m", ",", "p_bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "if", "m", "in", "common", ":", "p_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "else", ":", "bond", ".", "__init_copy__", "(", "p_bond", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "p_atoms", "[", "n", "]", "=", "other", ".", "_node", "[", "n", "]", "n_bonds", "=", "common_adj", "[", "n", "]", "for", "m", ",", "p_bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "in", "n_bonds", ":", "# set product state of changed bond", "n_bonds", "[", "m", "]", "[", "1", "]", "=", "p_bond", "elif", "m", "not", "in", "p_atoms", "and", "m", "in", "common", ":", "# new bond of reaction", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", ",", "r_atom", "in", "r_atoms", ".", "items", "(", ")", ":", "# prepare common DynAtom's", "p_atom", "=", "p_atoms", "[", "n", "]", "if", "r_atom", ".", "element", "!=", "p_atom", ".", "element", "or", "r_atom", ".", "isotope", "!=", "p_atom", ".", "isotope", ":", "raise", "ValueError", "(", "'atom-to-atom mapping invalid'", ")", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "atom", ".", "__init_copy__", "(", "r_atom", ",", "p_atom", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "n", ",", "m", ",", "(", "r_bond", ",", "p_bond", ")", "in", "common_bonds", ":", "if", "r_bond", "is", "p_bond", "is", "None", ":", "# skip None>None", "continue", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "p_bond", ")", "h", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ")", "for", "n", ",", "m", ",", "bond", "in", "bonds", ":", "h", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ")", "return", "h" ]
compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer
[ "compose", "2", "graphs", "to", "CGR" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/compose.py#L30-L172
train
cimm-kzn/CGRtools
CGRtools/algorithms/compose.py
CGRCompose.decompose
def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """ mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
python
def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """ mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
[ "def", "decompose", "(", "self", ")", ":", "mc", "=", "self", ".", "_get_subclass", "(", "'MoleculeContainer'", ")", "reactants", "=", "mc", "(", ")", "products", "=", "mc", "(", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "reactants", ".", "add_atom", "(", "atom", ".", "_reactant", ",", "n", ")", "products", ".", "add_atom", "(", "atom", ".", "_product", ",", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "is", "not", "None", ":", "reactants", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ".", "_reactant", ")", "if", "bond", ".", "_product", "is", "not", "None", ":", "products", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ".", "_product", ")", "return", "reactants", ",", "products" ]
decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules
[ "decompose", "CGR", "to", "pair", "of", "Molecules", "which", "represents", "reactants", "and", "products", "state", "of", "reaction" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/compose.py#L182-L201
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.cycle_data
def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True): """Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. """ cycle_names = [s['name'] for s in self.settings['cycle']] accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted) completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete) series = { 'key': {'data': [], 'dtype': str}, 'url': {'data': [], 'dtype': str}, 'issue_type': {'data': [], 'dtype': str}, 'summary': {'data': [], 'dtype': str}, 'status': {'data': [], 'dtype': str}, 'resolution': {'data': [], 'dtype': str}, 'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'}, 'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}, 'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'} } if sys.platform.startswith('win'): buffer = open("cycledata.tmp", "w+",1) # Opens a file for writing only in binary format. Overwrites the file if the file exists. # buffering value is 1 # Windows users seem to have a problem with spooled file else: buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t') #issuelinks = open("issuelinks.csv", "w+", 1) #df_edges = pd.DataFrame() #df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType']) #df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8') df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size']) df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8') for cycle_name in cycle_names: series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'} for name in self.fields.keys(): series[name] = {'data': [], 'dtype': 'object'} if self.settings['query_attribute']: series[self.settings['query_attribute']] = {'data': [], 'dtype': str} for criteria in self.settings['queries']: for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog): # Deal with the differences in strings between Python 2 & 3 if (sys.version_info > (3, 0)): # Python 3 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary, # .encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } else: # Python 2 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary.encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } for name, field_name in self.fields.items(): item[name] = self.resolve_field_value(issue, name, field_name) if self.settings['query_attribute']: item[self.settings['query_attribute']] = criteria.get('value', None) for cycle_name in cycle_names: item[cycle_name] = None # Get the relationships for this issue edges = [] # Source, Target, Inward Link, Outward Link, Type issuelinks = issue.fields.issuelinks # It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name" # Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data # Remove this code. #issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link #if issueEpic is not None: # data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'} # edges.append(data) for link in issuelinks: inwardissue = None outwardissue = None try: inwardissue = link.inwardIssue.key except: outwardissue = link.outwardIssue.key if inwardissue is not None: data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name} else: data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name} edges.append(data) if len(edges)>0: try: df_edges except NameError: #print('Not found') df_edges = pd.DataFrame(edges) else: df_links = pd.DataFrame(edges) df_edges=df_edges.append(df_links) # = pd.DataFrame(edges) # Got all the relationships for this issue rows = [] try: for snapshot in self.iter_size_changes(issue): data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size} rows.append(data) df = pd.DataFrame(rows) # Create the toDate column df_toDate=df['fromDate'].shift(-1) df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc) df['toDate'] = df_toDate except: df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size']) # Round Down datetimes to full dates df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) # If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation. # This size will not be recorded in the size_change record. # Hence update the single row we have with the current issue size. # Get Story Points size changes history #If condition is met update the size cell if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1): #if (item['StoryPoints'] is not None ) and (len(df)==1): df.loc[df.index[0], 'size'] = item['StoryPoints'] # Append to csv file df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None, mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8') #print(rows) # If the first column in item lifecycle was scipted put the created data in it. if item[cycle_names[0]] is None: item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp'] # Figure out why the first Column does not have created date #print(dateutil.parser.parse(item['created_timestamp'])) # Record date of status changes for snapshot in self.iter_changes(issue, True): snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None) if snapshot_cycle_step is None: if verbose: print(issue.key, "transitioned to unknown JIRA status", snapshot.status) continue snapshot_cycle_step_name = snapshot_cycle_step['name'] # Keep the first time we entered a step if item[snapshot_cycle_step_name] is None: item[snapshot_cycle_step_name] = snapshot.date # Wipe any subsequent dates, in case this was a move backwards found_cycle_name = False for cycle_name in cycle_names: if not found_cycle_name and cycle_name == snapshot_cycle_step_name: found_cycle_name = True continue elif found_cycle_name and item[cycle_name] is not None: if verbose: print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name) item[cycle_name] = None # Wipe timestamps if items have moved backwards; calculate cycle time previous_timestamp = None accepted_timestamp = None completed_timestamp = None for cycle_name in cycle_names: if item[cycle_name] is not None: previous_timestamp = item[cycle_name] if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps: accepted_timestamp = previous_timestamp if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps: completed_timestamp = previous_timestamp if accepted_timestamp is not None and completed_timestamp is not None: item['cycle_time'] = completed_timestamp - accepted_timestamp item['completed_timestamp'] = completed_timestamp for k, v in item.items(): series[k]['data'].append(v) data = {} for k, v in series.items(): data[k] = pd.Series(v['data'], dtype=v['dtype']) result_cycle = pd.DataFrame(data, columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + sorted(self.fields.keys()) + ([self.settings['query_attribute']] if self.settings['query_attribute'] else []) + ['cycle_time', 'completed_timestamp'] + cycle_names ) result_size = pd.DataFrame() buffer.seek(0) result_size = result_size.from_csv(buffer, sep='\t') buffer.close() try: df_edges except NameError: # print('Not found') df_edges = pd.DataFrame() try: df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order #df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8') except KeyError: print('Info: No issue edges found.') result_edges=df_edges # There maybe no result_size data is we might not have any change history try: result_size.set_index('key') except KeyError: result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size']) result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d')) result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d')) return result_cycle, result_size, result_edges
python
def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True): """Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. """ cycle_names = [s['name'] for s in self.settings['cycle']] accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted) completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete) series = { 'key': {'data': [], 'dtype': str}, 'url': {'data': [], 'dtype': str}, 'issue_type': {'data': [], 'dtype': str}, 'summary': {'data': [], 'dtype': str}, 'status': {'data': [], 'dtype': str}, 'resolution': {'data': [], 'dtype': str}, 'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'}, 'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}, 'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'} } if sys.platform.startswith('win'): buffer = open("cycledata.tmp", "w+",1) # Opens a file for writing only in binary format. Overwrites the file if the file exists. # buffering value is 1 # Windows users seem to have a problem with spooled file else: buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t') #issuelinks = open("issuelinks.csv", "w+", 1) #df_edges = pd.DataFrame() #df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType']) #df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8') df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size']) df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8') for cycle_name in cycle_names: series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'} for name in self.fields.keys(): series[name] = {'data': [], 'dtype': 'object'} if self.settings['query_attribute']: series[self.settings['query_attribute']] = {'data': [], 'dtype': str} for criteria in self.settings['queries']: for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog): # Deal with the differences in strings between Python 2 & 3 if (sys.version_info > (3, 0)): # Python 3 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary, # .encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } else: # Python 2 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary.encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } for name, field_name in self.fields.items(): item[name] = self.resolve_field_value(issue, name, field_name) if self.settings['query_attribute']: item[self.settings['query_attribute']] = criteria.get('value', None) for cycle_name in cycle_names: item[cycle_name] = None # Get the relationships for this issue edges = [] # Source, Target, Inward Link, Outward Link, Type issuelinks = issue.fields.issuelinks # It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name" # Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data # Remove this code. #issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link #if issueEpic is not None: # data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'} # edges.append(data) for link in issuelinks: inwardissue = None outwardissue = None try: inwardissue = link.inwardIssue.key except: outwardissue = link.outwardIssue.key if inwardissue is not None: data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name} else: data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name} edges.append(data) if len(edges)>0: try: df_edges except NameError: #print('Not found') df_edges = pd.DataFrame(edges) else: df_links = pd.DataFrame(edges) df_edges=df_edges.append(df_links) # = pd.DataFrame(edges) # Got all the relationships for this issue rows = [] try: for snapshot in self.iter_size_changes(issue): data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size} rows.append(data) df = pd.DataFrame(rows) # Create the toDate column df_toDate=df['fromDate'].shift(-1) df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc) df['toDate'] = df_toDate except: df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size']) # Round Down datetimes to full dates df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) # If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation. # This size will not be recorded in the size_change record. # Hence update the single row we have with the current issue size. # Get Story Points size changes history #If condition is met update the size cell if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1): #if (item['StoryPoints'] is not None ) and (len(df)==1): df.loc[df.index[0], 'size'] = item['StoryPoints'] # Append to csv file df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None, mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8') #print(rows) # If the first column in item lifecycle was scipted put the created data in it. if item[cycle_names[0]] is None: item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp'] # Figure out why the first Column does not have created date #print(dateutil.parser.parse(item['created_timestamp'])) # Record date of status changes for snapshot in self.iter_changes(issue, True): snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None) if snapshot_cycle_step is None: if verbose: print(issue.key, "transitioned to unknown JIRA status", snapshot.status) continue snapshot_cycle_step_name = snapshot_cycle_step['name'] # Keep the first time we entered a step if item[snapshot_cycle_step_name] is None: item[snapshot_cycle_step_name] = snapshot.date # Wipe any subsequent dates, in case this was a move backwards found_cycle_name = False for cycle_name in cycle_names: if not found_cycle_name and cycle_name == snapshot_cycle_step_name: found_cycle_name = True continue elif found_cycle_name and item[cycle_name] is not None: if verbose: print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name) item[cycle_name] = None # Wipe timestamps if items have moved backwards; calculate cycle time previous_timestamp = None accepted_timestamp = None completed_timestamp = None for cycle_name in cycle_names: if item[cycle_name] is not None: previous_timestamp = item[cycle_name] if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps: accepted_timestamp = previous_timestamp if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps: completed_timestamp = previous_timestamp if accepted_timestamp is not None and completed_timestamp is not None: item['cycle_time'] = completed_timestamp - accepted_timestamp item['completed_timestamp'] = completed_timestamp for k, v in item.items(): series[k]['data'].append(v) data = {} for k, v in series.items(): data[k] = pd.Series(v['data'], dtype=v['dtype']) result_cycle = pd.DataFrame(data, columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + sorted(self.fields.keys()) + ([self.settings['query_attribute']] if self.settings['query_attribute'] else []) + ['cycle_time', 'completed_timestamp'] + cycle_names ) result_size = pd.DataFrame() buffer.seek(0) result_size = result_size.from_csv(buffer, sep='\t') buffer.close() try: df_edges except NameError: # print('Not found') df_edges = pd.DataFrame() try: df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order #df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8') except KeyError: print('Info: No issue edges found.') result_edges=df_edges # There maybe no result_size data is we might not have any change history try: result_size.set_index('key') except KeyError: result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size']) result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d')) result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d')) return result_cycle, result_size, result_edges
[ "def", "cycle_data", "(", "self", ",", "verbose", "=", "False", ",", "result_cycle", "=", "None", ",", "result_size", "=", "None", ",", "result_edges", "=", "None", ",", "changelog", "=", "True", ")", ":", "cycle_names", "=", "[", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "]", "accepted_steps", "=", "set", "(", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "if", "s", "[", "'type'", "]", "==", "StatusTypes", ".", "accepted", ")", "completed_steps", "=", "set", "(", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "if", "s", "[", "'type'", "]", "==", "StatusTypes", ".", "complete", ")", "series", "=", "{", "'key'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'url'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'issue_type'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'summary'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'status'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'resolution'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'cycle_time'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'timedelta64[ns]'", "}", ",", "'completed_timestamp'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", ",", "'created_timestamp'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", "}", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "buffer", "=", "open", "(", "\"cycledata.tmp\"", ",", "\"w+\"", ",", "1", ")", "# Opens a file for writing only in binary format. Overwrites the file if the file exists.", "# buffering value is 1", "# Windows users seem to have a problem with spooled file", "else", ":", "buffer", "=", "tempfile", ".", "SpooledTemporaryFile", "(", "max_size", "=", "50000", ",", "mode", "=", "'w+t'", ")", "#issuelinks = open(\"issuelinks.csv\", \"w+\", 1)", "#df_edges = pd.DataFrame()", "#df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'])", "#df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\\t',encoding='utf-8')", "df_size_history", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "df_size_history", ".", "to_csv", "(", "buffer", ",", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ",", "header", "=", "True", ",", "index", "=", "None", ",", "sep", "=", "'\\t'", ",", "encoding", "=", "'utf-8'", ")", "for", "cycle_name", "in", "cycle_names", ":", "series", "[", "cycle_name", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", "for", "name", "in", "self", ".", "fields", ".", "keys", "(", ")", ":", "series", "[", "name", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'object'", "}", "if", "self", ".", "settings", "[", "'query_attribute'", "]", ":", "series", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", "for", "criteria", "in", "self", ".", "settings", "[", "'queries'", "]", ":", "for", "issue", "in", "self", ".", "find_issues", "(", "criteria", ",", "order", "=", "'updatedDate DESC'", ",", "verbose", "=", "verbose", ",", "changelog", "=", "changelog", ")", ":", "# Deal with the differences in strings between Python 2 & 3", "if", "(", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ")", ":", "# Python 3 code in this block", "item", "=", "{", "'key'", ":", "issue", ".", "key", ",", "'url'", ":", "\"%s/browse/%s\"", "%", "(", "self", ".", "jira", ".", "_options", "[", "'server'", "]", ",", "issue", ".", "key", ",", ")", ",", "'issue_type'", ":", "issue", ".", "fields", ".", "issuetype", ".", "name", ",", "'summary'", ":", "issue", ".", "fields", ".", "summary", ",", "# .encode('utf-8'),", "'status'", ":", "issue", ".", "fields", ".", "status", ".", "name", ",", "'resolution'", ":", "issue", ".", "fields", ".", "resolution", ".", "name", "if", "issue", ".", "fields", ".", "resolution", "else", "None", ",", "'cycle_time'", ":", "None", ",", "'completed_timestamp'", ":", "None", ",", "'created_timestamp'", ":", "issue", ".", "fields", ".", "created", "[", ":", "19", "]", "}", "else", ":", "# Python 2 code in this block", "item", "=", "{", "'key'", ":", "issue", ".", "key", ",", "'url'", ":", "\"%s/browse/%s\"", "%", "(", "self", ".", "jira", ".", "_options", "[", "'server'", "]", ",", "issue", ".", "key", ",", ")", ",", "'issue_type'", ":", "issue", ".", "fields", ".", "issuetype", ".", "name", ",", "'summary'", ":", "issue", ".", "fields", ".", "summary", ".", "encode", "(", "'utf-8'", ")", ",", "'status'", ":", "issue", ".", "fields", ".", "status", ".", "name", ",", "'resolution'", ":", "issue", ".", "fields", ".", "resolution", ".", "name", "if", "issue", ".", "fields", ".", "resolution", "else", "None", ",", "'cycle_time'", ":", "None", ",", "'completed_timestamp'", ":", "None", ",", "'created_timestamp'", ":", "issue", ".", "fields", ".", "created", "[", ":", "19", "]", "}", "for", "name", ",", "field_name", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "item", "[", "name", "]", "=", "self", ".", "resolve_field_value", "(", "issue", ",", "name", ",", "field_name", ")", "if", "self", ".", "settings", "[", "'query_attribute'", "]", ":", "item", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "=", "criteria", ".", "get", "(", "'value'", ",", "None", ")", "for", "cycle_name", "in", "cycle_names", ":", "item", "[", "cycle_name", "]", "=", "None", "# Get the relationships for this issue", "edges", "=", "[", "]", "# Source, Target, Inward Link, Outward Link, Type", "issuelinks", "=", "issue", ".", "fields", ".", "issuelinks", "# It is seems that having an Epic Parent does not record an Epic Link, just the name \"Epic Name\"", "# Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data", "# Remove this code.", "#issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link", "#if issueEpic is not None:", "# data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'}", "# edges.append(data)", "for", "link", "in", "issuelinks", ":", "inwardissue", "=", "None", "outwardissue", "=", "None", "try", ":", "inwardissue", "=", "link", ".", "inwardIssue", ".", "key", "except", ":", "outwardissue", "=", "link", ".", "outwardIssue", ".", "key", "if", "inwardissue", "is", "not", "None", ":", "data", "=", "{", "'LinkID'", ":", "link", ".", "id", ",", "'Source'", ":", "inwardissue", ",", "'Target'", ":", "issue", ".", "key", ",", "'InwardLink'", ":", "link", ".", "type", ".", "inward", ",", "'OutwardLink'", ":", "link", ".", "type", ".", "outward", ",", "'LinkType'", ":", "link", ".", "type", ".", "name", "}", "else", ":", "data", "=", "{", "'LinkID'", ":", "link", ".", "id", ",", "'Source'", ":", "issue", ".", "key", ",", "'Target'", ":", "outwardissue", ",", "'InwardLink'", ":", "link", ".", "type", ".", "inward", ",", "'OutwardLink'", ":", "link", ".", "type", ".", "outward", ",", "'LinkType'", ":", "link", ".", "type", ".", "name", "}", "edges", ".", "append", "(", "data", ")", "if", "len", "(", "edges", ")", ">", "0", ":", "try", ":", "df_edges", "except", "NameError", ":", "#print('Not found')", "df_edges", "=", "pd", ".", "DataFrame", "(", "edges", ")", "else", ":", "df_links", "=", "pd", ".", "DataFrame", "(", "edges", ")", "df_edges", "=", "df_edges", ".", "append", "(", "df_links", ")", "# = pd.DataFrame(edges)", "# Got all the relationships for this issue", "rows", "=", "[", "]", "try", ":", "for", "snapshot", "in", "self", ".", "iter_size_changes", "(", "issue", ")", ":", "data", "=", "{", "'key'", ":", "snapshot", ".", "key", ",", "'fromDate'", ":", "snapshot", ".", "date", ",", "'size'", ":", "snapshot", ".", "size", "}", "rows", ".", "append", "(", "data", ")", "df", "=", "pd", ".", "DataFrame", "(", "rows", ")", "# Create the toDate column", "df_toDate", "=", "df", "[", "'fromDate'", "]", ".", "shift", "(", "-", "1", ")", "df_toDate", ".", "loc", "[", "len", "(", "df_toDate", ")", "-", "1", "]", "=", "datetime", ".", "datetime", ".", "now", "(", "pytz", ".", "utc", ")", "df", "[", "'toDate'", "]", "=", "df_toDate", "except", ":", "df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "# Round Down datetimes to full dates", "df", "[", "'fromDate'", "]", "=", "df", "[", "'fromDate'", "]", ".", "apply", "(", "lambda", "dt", ":", "datetime", ".", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", ")", "df", "[", "'toDate'", "]", "=", "df", "[", "'toDate'", "]", ".", "apply", "(", "lambda", "dt", ":", "datetime", ".", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", ")", "# If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation.", "# This size will not be recorded in the size_change record.", "# Hence update the single row we have with the current issue size.", "# Get Story Points size changes history", "#If condition is met update the size cell", "if", "getattr", "(", "item", ",", "'StoryPoints'", ",", "None", ")", "is", "not", "None", "and", "(", "df", ".", "shape", "[", "0", "]", "==", "1", ")", ":", "#if (item['StoryPoints'] is not None ) and (len(df)==1):", "df", ".", "loc", "[", "df", ".", "index", "[", "0", "]", ",", "'size'", "]", "=", "item", "[", "'StoryPoints'", "]", "# Append to csv file", "df", ".", "to_csv", "(", "buffer", ",", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ",", "header", "=", "None", ",", "mode", "=", "'a'", ",", "sep", "=", "'\\t'", ",", "date_format", "=", "'%Y-%m-%d'", ",", "encoding", "=", "'utf-8'", ")", "#print(rows)", "# If the first column in item lifecycle was scipted put the created data in it.", "if", "item", "[", "cycle_names", "[", "0", "]", "]", "is", "None", ":", "item", "[", "cycle_names", "[", "0", "]", "]", "=", "dateutil", ".", "parser", ".", "parse", "(", "item", "[", "'created_timestamp'", "]", ")", "#item['created_timestamp']", "# Figure out why the first Column does not have created date", "#print(dateutil.parser.parse(item['created_timestamp']))", "# Record date of status changes", "for", "snapshot", "in", "self", ".", "iter_changes", "(", "issue", ",", "True", ")", ":", "snapshot_cycle_step", "=", "self", ".", "settings", "[", "'cycle_lookup'", "]", ".", "get", "(", "snapshot", ".", "status", ".", "lower", "(", ")", ",", "None", ")", "if", "snapshot_cycle_step", "is", "None", ":", "if", "verbose", ":", "print", "(", "issue", ".", "key", ",", "\"transitioned to unknown JIRA status\"", ",", "snapshot", ".", "status", ")", "continue", "snapshot_cycle_step_name", "=", "snapshot_cycle_step", "[", "'name'", "]", "# Keep the first time we entered a step", "if", "item", "[", "snapshot_cycle_step_name", "]", "is", "None", ":", "item", "[", "snapshot_cycle_step_name", "]", "=", "snapshot", ".", "date", "# Wipe any subsequent dates, in case this was a move backwards", "found_cycle_name", "=", "False", "for", "cycle_name", "in", "cycle_names", ":", "if", "not", "found_cycle_name", "and", "cycle_name", "==", "snapshot_cycle_step_name", ":", "found_cycle_name", "=", "True", "continue", "elif", "found_cycle_name", "and", "item", "[", "cycle_name", "]", "is", "not", "None", ":", "if", "verbose", ":", "print", "(", "issue", ".", "key", ",", "\"moved backwards to\"", ",", "snapshot_cycle_step_name", ",", "\"wiping date for subsequent step\"", ",", "cycle_name", ")", "item", "[", "cycle_name", "]", "=", "None", "# Wipe timestamps if items have moved backwards; calculate cycle time", "previous_timestamp", "=", "None", "accepted_timestamp", "=", "None", "completed_timestamp", "=", "None", "for", "cycle_name", "in", "cycle_names", ":", "if", "item", "[", "cycle_name", "]", "is", "not", "None", ":", "previous_timestamp", "=", "item", "[", "cycle_name", "]", "if", "accepted_timestamp", "is", "None", "and", "previous_timestamp", "is", "not", "None", "and", "cycle_name", "in", "accepted_steps", ":", "accepted_timestamp", "=", "previous_timestamp", "if", "completed_timestamp", "is", "None", "and", "previous_timestamp", "is", "not", "None", "and", "cycle_name", "in", "completed_steps", ":", "completed_timestamp", "=", "previous_timestamp", "if", "accepted_timestamp", "is", "not", "None", "and", "completed_timestamp", "is", "not", "None", ":", "item", "[", "'cycle_time'", "]", "=", "completed_timestamp", "-", "accepted_timestamp", "item", "[", "'completed_timestamp'", "]", "=", "completed_timestamp", "for", "k", ",", "v", "in", "item", ".", "items", "(", ")", ":", "series", "[", "k", "]", "[", "'data'", "]", ".", "append", "(", "v", ")", "data", "=", "{", "}", "for", "k", ",", "v", "in", "series", ".", "items", "(", ")", ":", "data", "[", "k", "]", "=", "pd", ".", "Series", "(", "v", "[", "'data'", "]", ",", "dtype", "=", "v", "[", "'dtype'", "]", ")", "result_cycle", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "[", "'key'", ",", "'url'", ",", "'issue_type'", ",", "'summary'", ",", "'status'", ",", "'resolution'", "]", "+", "sorted", "(", "self", ".", "fields", ".", "keys", "(", ")", ")", "+", "(", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "if", "self", ".", "settings", "[", "'query_attribute'", "]", "else", "[", "]", ")", "+", "[", "'cycle_time'", ",", "'completed_timestamp'", "]", "+", "cycle_names", ")", "result_size", "=", "pd", ".", "DataFrame", "(", ")", "buffer", ".", "seek", "(", "0", ")", "result_size", "=", "result_size", ".", "from_csv", "(", "buffer", ",", "sep", "=", "'\\t'", ")", "buffer", ".", "close", "(", ")", "try", ":", "df_edges", "except", "NameError", ":", "# print('Not found')", "df_edges", "=", "pd", ".", "DataFrame", "(", ")", "try", ":", "df_edges", "=", "df_edges", "[", "[", "'Source'", ",", "'OutwardLink'", ",", "'Target'", ",", "'InwardLink'", ",", "'LinkType'", ",", "'LinkID'", "]", "]", "# Specify dataframe sort order", "#df_edges.to_csv(\"myedges.csv\", sep='\\t', index=False,encoding='utf-8')", "except", "KeyError", ":", "print", "(", "'Info: No issue edges found.'", ")", "result_edges", "=", "df_edges", "# There maybe no result_size data is we might not have any change history", "try", ":", "result_size", ".", "set_index", "(", "'key'", ")", "except", "KeyError", ":", "result_size", "=", "pd", ".", "DataFrame", "(", "index", "=", "[", "'key'", "]", ",", "columns", "=", "[", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "result_size", "[", "'toDate'", "]", "=", "pd", ".", "to_datetime", "(", "result_size", "[", "'toDate'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "result_size", "[", "'fromDate'", "]", "=", "pd", ".", "to_datetime", "(", "result_size", "[", "'fromDate'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "return", "result_cycle", ",", "result_size", ",", "result_edges" ]
Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased.
[ "Get", "data", "from", "JIRA", "for", "cycle", "/", "flow", "times", "and", "story", "points", "size", "change", "." ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L86-L357
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.size_history
def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """ def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
python
def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """ def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
[ "def", "size_history", "(", "self", ",", "size_data", ")", ":", "def", "my_merge", "(", "df1", ",", "df2", ")", ":", "# http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes", "res", "=", "pd", ".", "merge", "(", "df1", ",", "df2", ",", "how", "=", "'outer'", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "cols", "=", "sorted", "(", "res", ".", "columns", ")", "pairs", "=", "[", "]", "for", "col1", ",", "col2", "in", "zip", "(", "cols", "[", ":", "-", "1", "]", ",", "cols", "[", "1", ":", "]", ")", ":", "if", "col1", ".", "endswith", "(", "'_x'", ")", "and", "col2", ".", "endswith", "(", "'_y'", ")", ":", "pairs", ".", "append", "(", "(", "col1", ",", "col2", ")", ")", "for", "col1", ",", "col2", "in", "pairs", ":", "res", "[", "col1", "[", ":", "-", "2", "]", "]", "=", "res", "[", "col1", "]", ".", "combine_first", "(", "res", "[", "col2", "]", ")", "res", "=", "res", ".", "drop", "(", "[", "col1", ",", "col2", "]", ",", "axis", "=", "1", ")", "return", "res", "dfs_key", "=", "[", "]", "# Group the dataframe by regiment, and for each regiment,", "for", "name", ",", "group", "in", "size_data", ".", "groupby", "(", "'key'", ")", ":", "dfs", "=", "[", "]", "for", "row", "in", "group", ".", "itertuples", "(", ")", ":", "# print(row.Index, row.fromDate,row.toDate, row.size)", "dates", "=", "pd", ".", "date_range", "(", "start", "=", "row", ".", "fromDate", ",", "end", "=", "row", ".", "toDate", ")", "sizes", "=", "[", "row", ".", "size", "]", "*", "len", "(", "dates", ")", "data", "=", "{", "'date'", ":", "dates", ",", "'size'", ":", "sizes", "}", "df2", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "[", "'date'", ",", "'size'", "]", ")", "pd", ".", "to_datetime", "(", "df2", "[", "'date'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "df2", ".", "set_index", "(", "[", "'date'", "]", ",", "inplace", "=", "True", ")", "dfs", ".", "append", "(", "df2", ")", "# df_final = reduce(lambda left,right: pd.merge(left,right), dfs)", "df_key", "=", "(", "reduce", "(", "my_merge", ",", "dfs", ")", ")", "df_key", ".", "columns", "=", "[", "name", "if", "x", "==", "'size'", "else", "x", "for", "x", "in", "df_key", ".", "columns", "]", "dfs_key", ".", "append", "(", "df_key", ")", "df_all", "=", "(", "reduce", "(", "my_merge", ",", "dfs_key", ")", ")", "# Sort the columns based on Jira Project code and issue number", "mykeys", "=", "df_all", ".", "columns", ".", "values", ".", "tolist", "(", ")", "mykeys", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "split", "(", "'-'", ")", "[", "0", "]", "+", "'-'", "+", "str", "(", "int", "(", "x", ".", "split", "(", "'-'", ")", "[", "1", "]", ")", ")", ".", "zfill", "(", "6", ")", ")", "df_all", "=", "df_all", "[", "mykeys", "]", "# Reindex to make sure we have all dates", "start", ",", "end", "=", "df_all", ".", "index", ".", "min", "(", ")", ",", "df_all", ".", "index", ".", "max", "(", ")", "df_all", "=", "df_all", ".", "reindex", "(", "pd", ".", "date_range", "(", "start", ",", "end", ",", "freq", "=", "'D'", ")", ",", "method", "=", "'ffill'", ")", "return", "df_all" ]
Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number.
[ "Return", "the", "a", "DataFrame", "indexed", "by", "day", "with", "columns", "containing", "story", "size", "for", "each", "issue", "." ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L359-L407
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.cfd
def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ): """Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. """ # Define helper function def cumulativeColumnStates(df,stacked): """ Calculate the column sums, were the incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value to avoid counting items in prior states. :param df: :return: pandas dataframe row with sum of column items """ # Helper functions to return the right most cells in 2D array def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy if stacked: df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) else: df_result = df_zeroed sum_row = df_result[df.columns].sum() # Sum Columns return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return # Helper function to return the right most cells in 2D array def keeprightmoststate(df): """ Incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value. :param df: :return: pandas dataframe row with sum of column items """ def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) return df_result # Define helper function def hide_greater_than_date(cell, adate): """ Helper function to compare date values in cells """ result = False try: celldatetime = datetime.date(cell.year, cell.month, cell.day) except: return True if celldatetime > adate: return True return False # We have a date value in cell and it is less than or equal to input date # Helper function def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'): import os if not os.path.isfile(csvFilePath): df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding) elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns): raise Exception( "Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str( len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.") elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all(): raise Exception("Columns and column order of dataframe and csv file do not match!!") else: df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding) #print(pointscolumn) # List of all state change columns that may have date value in them cycle_names = [s['name'] for s in self.settings['cycle']] # Create list of columns that we want to return in our results dataFrame slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference. if pointscolumn: for size_state in self.settings['sized_statuses']: # states_to_size: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) # Check that it works if we use all columns as sized. slice_columns = [] for size_state in cycle_names: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) else: slice_columns = cycle_names # Build a dataframe of just the "date" columns df = cycle_data[cycle_names].copy() # Strip out times from all dates df = pd.DataFrame( np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'), columns=df.columns, index=df.index ) # No history provided this thus we return dataframe with just column headers. if size_history is None: return df # Get a list of dates that a issue changed state state_changes_on_dates_set = set() for state in cycle_names: state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state])) # How many unique days did a issue stage state # Remove non timestamp vlaues and sort the list state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date, sorted(list(state_changes_on_dates_set))) # Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp df = df.fillna(method='bfill', axis=1) if pointscolumn: storypoints = cycle_data[pointscolumn] # As at today ids = cycle_data['key'] # create blank results dataframe df_results = pd.DataFrame() # For each date on which we had a issue state change we want to count and sum the totals for each of the given states # 'Open','Analysis','Backlog','In Process','Done','Withdrawn' timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S') for date_index,statechangedate in enumerate(state_changes_on_dates): if date_index%10 == 0: # Print out Progress every tenth pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates))) if type(statechangedate.date()) == datetime.date: # filterdate.year,filterdate.month,filterdate.day filterdate = datetime.date(statechangedate.year, statechangedate.month, statechangedate.day) # statechangedate.datetime() # Apply function to each cell and only make it visible if issue was in state on or after the filter date df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1) if stacked: df_filtered=keeprightmoststate(df_filtered) if pointscolumn and (size_history is not None): # For debug #if filterdate.isoformat() == '2016-11-22': # size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv") storypoints_series_on = size_history.loc[filterdate.isoformat()].T df_size_on_day = pd.Series.to_frame(storypoints_series_on) df_size_on_day.columns = [pointscolumn] # Make sure get size data in the same sequence as ids. left = pd.Series.to_frame(ids) right = df_size_on_day result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\ df_countable = pd.concat([result, df_filtered], axis=1) # for debuging and analytics append the days state to file df_countable['date'] = filterdate.isoformat() if stacked: file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv" else: file_name = "daily-cfd-run-at" + timenowstr + ".csv" appendDFToCSV(df_countable, file_name ) else: df_countable = df_filtered # Because we size issues with Story Points we need to add some additional columns # for each state based on size not just count if pointscolumn: for size_state in self.settings['sized_statuses']: #states_to_size: sizedStateName = size_state + 'Sized' df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1) # For debugging write dataframe to sheet for current day. #file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv" #df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL) df_slice = df_countable.loc[:,slice_columns].copy() df_sub_sum = cumulativeColumnStates(df_slice,stacked) final_table = df_sub_sum.rename(index={0: filterdate}) # append to results df_results = df_results.append(final_table) df_results.sort_index(inplace=True) df= df_results # Count number of times each date occurs, preserving column order #df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names] # Fill missing dates with 0 and run a cumulative sum #df = df.fillna(0).cumsum(axis=0) # Reindex to make sure we have all dates start, end = df.index.min(), df.index.max() try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill') except ValueError: pass return df
python
def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ): """Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. """ # Define helper function def cumulativeColumnStates(df,stacked): """ Calculate the column sums, were the incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value to avoid counting items in prior states. :param df: :return: pandas dataframe row with sum of column items """ # Helper functions to return the right most cells in 2D array def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy if stacked: df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) else: df_result = df_zeroed sum_row = df_result[df.columns].sum() # Sum Columns return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return # Helper function to return the right most cells in 2D array def keeprightmoststate(df): """ Incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value. :param df: :return: pandas dataframe row with sum of column items """ def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) return df_result # Define helper function def hide_greater_than_date(cell, adate): """ Helper function to compare date values in cells """ result = False try: celldatetime = datetime.date(cell.year, cell.month, cell.day) except: return True if celldatetime > adate: return True return False # We have a date value in cell and it is less than or equal to input date # Helper function def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'): import os if not os.path.isfile(csvFilePath): df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding) elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns): raise Exception( "Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str( len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.") elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all(): raise Exception("Columns and column order of dataframe and csv file do not match!!") else: df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding) #print(pointscolumn) # List of all state change columns that may have date value in them cycle_names = [s['name'] for s in self.settings['cycle']] # Create list of columns that we want to return in our results dataFrame slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference. if pointscolumn: for size_state in self.settings['sized_statuses']: # states_to_size: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) # Check that it works if we use all columns as sized. slice_columns = [] for size_state in cycle_names: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) else: slice_columns = cycle_names # Build a dataframe of just the "date" columns df = cycle_data[cycle_names].copy() # Strip out times from all dates df = pd.DataFrame( np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'), columns=df.columns, index=df.index ) # No history provided this thus we return dataframe with just column headers. if size_history is None: return df # Get a list of dates that a issue changed state state_changes_on_dates_set = set() for state in cycle_names: state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state])) # How many unique days did a issue stage state # Remove non timestamp vlaues and sort the list state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date, sorted(list(state_changes_on_dates_set))) # Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp df = df.fillna(method='bfill', axis=1) if pointscolumn: storypoints = cycle_data[pointscolumn] # As at today ids = cycle_data['key'] # create blank results dataframe df_results = pd.DataFrame() # For each date on which we had a issue state change we want to count and sum the totals for each of the given states # 'Open','Analysis','Backlog','In Process','Done','Withdrawn' timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S') for date_index,statechangedate in enumerate(state_changes_on_dates): if date_index%10 == 0: # Print out Progress every tenth pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates))) if type(statechangedate.date()) == datetime.date: # filterdate.year,filterdate.month,filterdate.day filterdate = datetime.date(statechangedate.year, statechangedate.month, statechangedate.day) # statechangedate.datetime() # Apply function to each cell and only make it visible if issue was in state on or after the filter date df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1) if stacked: df_filtered=keeprightmoststate(df_filtered) if pointscolumn and (size_history is not None): # For debug #if filterdate.isoformat() == '2016-11-22': # size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv") storypoints_series_on = size_history.loc[filterdate.isoformat()].T df_size_on_day = pd.Series.to_frame(storypoints_series_on) df_size_on_day.columns = [pointscolumn] # Make sure get size data in the same sequence as ids. left = pd.Series.to_frame(ids) right = df_size_on_day result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\ df_countable = pd.concat([result, df_filtered], axis=1) # for debuging and analytics append the days state to file df_countable['date'] = filterdate.isoformat() if stacked: file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv" else: file_name = "daily-cfd-run-at" + timenowstr + ".csv" appendDFToCSV(df_countable, file_name ) else: df_countable = df_filtered # Because we size issues with Story Points we need to add some additional columns # for each state based on size not just count if pointscolumn: for size_state in self.settings['sized_statuses']: #states_to_size: sizedStateName = size_state + 'Sized' df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1) # For debugging write dataframe to sheet for current day. #file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv" #df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL) df_slice = df_countable.loc[:,slice_columns].copy() df_sub_sum = cumulativeColumnStates(df_slice,stacked) final_table = df_sub_sum.rename(index={0: filterdate}) # append to results df_results = df_results.append(final_table) df_results.sort_index(inplace=True) df= df_results # Count number of times each date occurs, preserving column order #df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names] # Fill missing dates with 0 and run a cumulative sum #df = df.fillna(0).cumsum(axis=0) # Reindex to make sure we have all dates start, end = df.index.min(), df.index.max() try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill') except ValueError: pass return df
[ "def", "cfd", "(", "self", ",", "cycle_data", ",", "size_history", "=", "None", ",", "pointscolumn", "=", "None", ",", "stacked", "=", "True", ")", ":", "# Define helper function", "def", "cumulativeColumnStates", "(", "df", ",", "stacked", ")", ":", "\"\"\"\n Calculate the column sums, were the incoming matrix columns represents items in workflow states\n States progress from left to right.\n We what to zero out items, other than right most value to avoid counting items in prior states.\n :param df:\n :return: pandas dataframe row with sum of column items\n \"\"\"", "# Helper functions to return the right most cells in 2D array", "def", "last_number", "(", "lst", ")", ":", "if", "all", "(", "map", "(", "lambda", "x", ":", "x", "==", "0", ",", "lst", ")", ")", ":", "return", "0", "elif", "lst", "[", "-", "1", "]", "!=", "0", ":", "return", "len", "(", "lst", ")", "-", "1", "else", ":", "return", "last_number", "(", "lst", "[", ":", "-", "1", "]", ")", "def", "fill_others", "(", "lst", ")", ":", "new_lst", "=", "[", "0", "]", "*", "len", "(", "lst", ")", "new_lst", "[", "last_number", "(", "lst", ")", "]", "=", "lst", "[", "last_number", "(", "lst", ")", "]", "return", "new_lst", "df_zeroed", "=", "df", ".", "fillna", "(", "value", "=", "0", ")", "# ,inplace = True Get rid of non numeric items. Make a ?deep? copy", "if", "stacked", ":", "df_result", "=", "df_zeroed", ".", "apply", "(", "lambda", "x", ":", "fill_others", "(", "x", ".", "values", ".", "tolist", "(", ")", ")", ",", "axis", "=", "1", ")", "else", ":", "df_result", "=", "df_zeroed", "sum_row", "=", "df_result", "[", "df", ".", "columns", "]", ".", "sum", "(", ")", "# Sum Columns", "return", "pd", ".", "DataFrame", "(", "data", "=", "sum_row", ")", ".", "T", "# Transpose into row dataframe and return", "# Helper function to return the right most cells in 2D array", "def", "keeprightmoststate", "(", "df", ")", ":", "\"\"\"\n Incoming matrix columns represents items in workflow states\n States progress from left to right.\n We what to zero out items, other than right most value.\n :param df:\n :return: pandas dataframe row with sum of column items\n \"\"\"", "def", "last_number", "(", "lst", ")", ":", "if", "all", "(", "map", "(", "lambda", "x", ":", "x", "==", "0", ",", "lst", ")", ")", ":", "return", "0", "elif", "lst", "[", "-", "1", "]", "!=", "0", ":", "return", "len", "(", "lst", ")", "-", "1", "else", ":", "return", "last_number", "(", "lst", "[", ":", "-", "1", "]", ")", "def", "fill_others", "(", "lst", ")", ":", "new_lst", "=", "[", "0", "]", "*", "len", "(", "lst", ")", "new_lst", "[", "last_number", "(", "lst", ")", "]", "=", "lst", "[", "last_number", "(", "lst", ")", "]", "return", "new_lst", "df_zeroed", "=", "df", ".", "fillna", "(", "value", "=", "0", ")", "# ,inplace = True Get rid of non numeric items. Make a ?deep? copy", "df_result", "=", "df_zeroed", ".", "apply", "(", "lambda", "x", ":", "fill_others", "(", "x", ".", "values", ".", "tolist", "(", ")", ")", ",", "axis", "=", "1", ")", "return", "df_result", "# Define helper function", "def", "hide_greater_than_date", "(", "cell", ",", "adate", ")", ":", "\"\"\" Helper function to compare date values in cells\n \"\"\"", "result", "=", "False", "try", ":", "celldatetime", "=", "datetime", ".", "date", "(", "cell", ".", "year", ",", "cell", ".", "month", ",", "cell", ".", "day", ")", "except", ":", "return", "True", "if", "celldatetime", ">", "adate", ":", "return", "True", "return", "False", "# We have a date value in cell and it is less than or equal to input date", "# Helper function", "def", "appendDFToCSV", "(", "df", ",", "csvFilePath", ",", "sep", "=", "\"\\t\"", ",", "date_format", "=", "'%Y-%m-%d'", ",", "encoding", "=", "'utf-8'", ")", ":", "import", "os", "if", "not", "os", ".", "path", ".", "isfile", "(", "csvFilePath", ")", ":", "df", ".", "to_csv", "(", "csvFilePath", ",", "mode", "=", "'a'", ",", "index", "=", "False", ",", "sep", "=", "sep", ",", "date_format", "=", "date_format", ",", "encoding", "=", "encoding", ")", "elif", "len", "(", "df", ".", "columns", ")", "!=", "len", "(", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ":", "raise", "Exception", "(", "\"Columns do not match!! Dataframe has \"", "+", "str", "(", "len", "(", "df", ".", "columns", ")", ")", "+", "\" columns. CSV file has \"", "+", "str", "(", "len", "(", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ")", "+", "\" columns.\"", ")", "elif", "not", "(", "df", ".", "columns", "==", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ".", "all", "(", ")", ":", "raise", "Exception", "(", "\"Columns and column order of dataframe and csv file do not match!!\"", ")", "else", ":", "df", ".", "to_csv", "(", "csvFilePath", ",", "mode", "=", "'a'", ",", "index", "=", "False", ",", "sep", "=", "sep", ",", "header", "=", "False", ",", "date_format", "=", "date_format", ",", "encoding", "=", "encoding", ")", "#print(pointscolumn)", "# List of all state change columns that may have date value in them", "cycle_names", "=", "[", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "]", "# Create list of columns that we want to return in our results dataFrame", "slice_columns", "=", "list", "(", "self", ".", "settings", "[", "'none_sized_statuses'", "]", ")", "# Make a COPY of the list so that we dont modify the reference.", "if", "pointscolumn", ":", "for", "size_state", "in", "self", ".", "settings", "[", "'sized_statuses'", "]", ":", "# states_to_size:", "sizedStateName", "=", "size_state", "+", "'Sized'", "slice_columns", ".", "append", "(", "sizedStateName", ")", "# Check that it works if we use all columns as sized.", "slice_columns", "=", "[", "]", "for", "size_state", "in", "cycle_names", ":", "sizedStateName", "=", "size_state", "+", "'Sized'", "slice_columns", ".", "append", "(", "sizedStateName", ")", "else", ":", "slice_columns", "=", "cycle_names", "# Build a dataframe of just the \"date\" columns", "df", "=", "cycle_data", "[", "cycle_names", "]", ".", "copy", "(", ")", "# Strip out times from all dates", "df", "=", "pd", ".", "DataFrame", "(", "np", ".", "array", "(", "df", ".", "values", ",", "dtype", "=", "'<M8[ns]'", ")", ".", "astype", "(", "'<M8[D]'", ")", ".", "astype", "(", "'<M8[ns]'", ")", ",", "columns", "=", "df", ".", "columns", ",", "index", "=", "df", ".", "index", ")", "# No history provided this thus we return dataframe with just column headers.", "if", "size_history", "is", "None", ":", "return", "df", "# Get a list of dates that a issue changed state", "state_changes_on_dates_set", "=", "set", "(", ")", "for", "state", "in", "cycle_names", ":", "state_changes_on_dates_set", "=", "state_changes_on_dates_set", ".", "union", "(", "set", "(", "df", "[", "state", "]", ")", ")", "# How many unique days did a issue stage state", "# Remove non timestamp vlaues and sort the list", "state_changes_on_dates", "=", "filter", "(", "lambda", "x", ":", "type", "(", "x", ".", "date", "(", ")", ")", "==", "datetime", ".", "date", ",", "sorted", "(", "list", "(", "state_changes_on_dates_set", ")", ")", ")", "# Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp", "df", "=", "df", ".", "fillna", "(", "method", "=", "'bfill'", ",", "axis", "=", "1", ")", "if", "pointscolumn", ":", "storypoints", "=", "cycle_data", "[", "pointscolumn", "]", "# As at today", "ids", "=", "cycle_data", "[", "'key'", "]", "# create blank results dataframe", "df_results", "=", "pd", ".", "DataFrame", "(", ")", "# For each date on which we had a issue state change we want to count and sum the totals for each of the given states", "# 'Open','Analysis','Backlog','In Process','Done','Withdrawn'", "timenowstr", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'-run-%Y-%m-%d_%H-%M-%S'", ")", "for", "date_index", ",", "statechangedate", "in", "enumerate", "(", "state_changes_on_dates", ")", ":", "if", "date_index", "%", "10", "==", "0", ":", "# Print out Progress every tenth", "pass", "#print(\"CFD state change {} of {} \".format(date_index,len(state_changes_on_dates)))", "if", "type", "(", "statechangedate", ".", "date", "(", ")", ")", "==", "datetime", ".", "date", ":", "# filterdate.year,filterdate.month,filterdate.day", "filterdate", "=", "datetime", ".", "date", "(", "statechangedate", ".", "year", ",", "statechangedate", ".", "month", ",", "statechangedate", ".", "day", ")", "# statechangedate.datetime()", "# Apply function to each cell and only make it visible if issue was in state on or after the filter date", "df_filtered", "=", "df", ".", "applymap", "(", "lambda", "x", ":", "0", "if", "hide_greater_than_date", "(", "x", ",", "filterdate", ")", "else", "1", ")", "if", "stacked", ":", "df_filtered", "=", "keeprightmoststate", "(", "df_filtered", ")", "if", "pointscolumn", "and", "(", "size_history", "is", "not", "None", ")", ":", "# For debug", "#if filterdate.isoformat() == '2016-11-22':", "# size_history.loc[filterdate.isoformat()].to_csv(\"debug-size-history.csv\")", "storypoints_series_on", "=", "size_history", ".", "loc", "[", "filterdate", ".", "isoformat", "(", ")", "]", ".", "T", "df_size_on_day", "=", "pd", ".", "Series", ".", "to_frame", "(", "storypoints_series_on", ")", "df_size_on_day", ".", "columns", "=", "[", "pointscolumn", "]", "# Make sure get size data in the same sequence as ids.", "left", "=", "pd", ".", "Series", ".", "to_frame", "(", "ids", ")", "right", "=", "df_size_on_day", "result", "=", "left", ".", "join", "(", "right", ",", "on", "=", "[", "'key'", "]", ")", "# http://pandas.pydata.org/pandas-docs/stable/merging.html\\", "df_countable", "=", "pd", ".", "concat", "(", "[", "result", ",", "df_filtered", "]", ",", "axis", "=", "1", ")", "# for debuging and analytics append the days state to file", "df_countable", "[", "'date'", "]", "=", "filterdate", ".", "isoformat", "(", ")", "if", "stacked", ":", "file_name", "=", "\"daily-cfd-stacked-run-at\"", "+", "timenowstr", "+", "\".csv\"", "else", ":", "file_name", "=", "\"daily-cfd-run-at\"", "+", "timenowstr", "+", "\".csv\"", "appendDFToCSV", "(", "df_countable", ",", "file_name", ")", "else", ":", "df_countable", "=", "df_filtered", "# Because we size issues with Story Points we need to add some additional columns", "# for each state based on size not just count", "if", "pointscolumn", ":", "for", "size_state", "in", "self", ".", "settings", "[", "'sized_statuses'", "]", ":", "#states_to_size:", "sizedStateName", "=", "size_state", "+", "'Sized'", "df_countable", "[", "sizedStateName", "]", "=", "df_countable", ".", "apply", "(", "lambda", "row", ":", "(", "row", "[", "pointscolumn", "]", "*", "row", "[", "size_state", "]", ")", ",", "axis", "=", "1", ")", "# For debugging write dataframe to sheet for current day.", "#file_name=\"countable-cfd-for-day-\"+ filterdate.isoformat()+timenowstr+\".csv\"", "#df_countable.to_csv(file_name, sep='\\t', encoding='utf-8', quoting=csv.QUOTE_ALL)", "df_slice", "=", "df_countable", ".", "loc", "[", ":", ",", "slice_columns", "]", ".", "copy", "(", ")", "df_sub_sum", "=", "cumulativeColumnStates", "(", "df_slice", ",", "stacked", ")", "final_table", "=", "df_sub_sum", ".", "rename", "(", "index", "=", "{", "0", ":", "filterdate", "}", ")", "# append to results", "df_results", "=", "df_results", ".", "append", "(", "final_table", ")", "df_results", ".", "sort_index", "(", "inplace", "=", "True", ")", "df", "=", "df_results", "# Count number of times each date occurs, preserving column order", "#df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names]", "# Fill missing dates with 0 and run a cumulative sum", "#df = df.fillna(0).cumsum(axis=0)", "# Reindex to make sure we have all dates", "start", ",", "end", "=", "df", ".", "index", ".", "min", "(", ")", ",", "df", ".", "index", ".", "max", "(", ")", "try", ":", "# If we have no change history we will not have any data in the df and will get a ValueError on reindex", "df", "=", "df", ".", "reindex", "(", "pd", ".", "date_range", "(", "start", ",", "end", ",", "freq", "=", "'D'", ")", ",", "method", "=", "'ffill'", ")", "except", "ValueError", ":", "pass", "return", "df" ]
Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart.
[ "Return", "the", "data", "to", "build", "a", "cumulative", "flow", "diagram", ":", "a", "DataFrame", "indexed", "by", "day", "with", "columns", "containing", "cumulative", "counts", "for", "each", "of", "the", "items", "in", "the", "configured", "cycle", "." ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L410-L641
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.histogram
def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """ values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
python
def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """ values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
[ "def", "histogram", "(", "self", ",", "cycle_data", ",", "bins", "=", "10", ")", ":", "values", ",", "edges", "=", "np", ".", "histogram", "(", "cycle_data", "[", "'cycle_time'", "]", ".", "astype", "(", "'timedelta64[D]'", ")", ".", "dropna", "(", ")", ",", "bins", "=", "bins", ")", "index", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "edges", ")", ":", "if", "i", "==", "0", ":", "continue", "index", ".", "append", "(", "\"%.01f to %.01f\"", "%", "(", "edges", "[", "i", "-", "1", "]", ",", "edges", "[", "i", "]", ",", ")", ")", "return", "pd", ".", "Series", "(", "values", ",", "name", "=", "\"Items\"", ",", "index", "=", "index", ")" ]
Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays
[ "Return", "histogram", "data", "for", "the", "cycle", "times", "in", "cycle_data", ".", "Returns", "a", "dictionary", "with", "keys", "bin_values", "and", "bin_edges", "of", "numpy", "arrays" ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L644-L656
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.throughput_data
def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None): """Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). """ if len(cycle_data)<1: return None # Note completed items yet, return None if pointscolumn: return cycle_data[['completed_timestamp', pointscolumn]] \ .rename(columns={pointscolumn: 'sum'}) \ .groupby('completed_timestamp').sum() \ .resample(frequency).sum() \ .fillna(0) else: return cycle_data[['completed_timestamp', 'key']] \ .rename(columns={'key': 'count'}) \ .groupby('completed_timestamp').count() \ .resample(frequency).sum() \ .fillna(0)
python
def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None): """Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). """ if len(cycle_data)<1: return None # Note completed items yet, return None if pointscolumn: return cycle_data[['completed_timestamp', pointscolumn]] \ .rename(columns={pointscolumn: 'sum'}) \ .groupby('completed_timestamp').sum() \ .resample(frequency).sum() \ .fillna(0) else: return cycle_data[['completed_timestamp', 'key']] \ .rename(columns={'key': 'count'}) \ .groupby('completed_timestamp').count() \ .resample(frequency).sum() \ .fillna(0)
[ "def", "throughput_data", "(", "self", ",", "cycle_data", ",", "frequency", "=", "'1D'", ",", "pointscolumn", "=", "None", ")", ":", "if", "len", "(", "cycle_data", ")", "<", "1", ":", "return", "None", "# Note completed items yet, return None", "if", "pointscolumn", ":", "return", "cycle_data", "[", "[", "'completed_timestamp'", ",", "pointscolumn", "]", "]", ".", "rename", "(", "columns", "=", "{", "pointscolumn", ":", "'sum'", "}", ")", ".", "groupby", "(", "'completed_timestamp'", ")", ".", "sum", "(", ")", ".", "resample", "(", "frequency", ")", ".", "sum", "(", ")", ".", "fillna", "(", "0", ")", "else", ":", "return", "cycle_data", "[", "[", "'completed_timestamp'", ",", "'key'", "]", "]", ".", "rename", "(", "columns", "=", "{", "'key'", ":", "'count'", "}", ")", ".", "groupby", "(", "'completed_timestamp'", ")", ".", "count", "(", ")", ".", "resample", "(", "frequency", ")", ".", "sum", "(", ")", ".", "fillna", "(", "0", ")" ]
Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily).
[ "Return", "a", "data", "frame", "with", "columns", "completed_timestamp", "of", "the", "given", "frequency", "either", "count", "where", "count", "is", "the", "number", "of", "items", "sum", "where", "sum", "is", "the", "sum", "of", "value", "specified", "by", "pointscolumn", ".", "Expected", "to", "be", "StoryPoints", "completed", "at", "that", "timestamp", "(", "e", ".", "g", ".", "daily", ")", "." ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L658-L679
train
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.scatterplot
def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """ columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
python
def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """ columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
[ "def", "scatterplot", "(", "self", ",", "cycle_data", ")", ":", "columns", "=", "list", "(", "cycle_data", ".", "columns", ")", "columns", ".", "remove", "(", "'cycle_time'", ")", "columns", ".", "remove", "(", "'completed_timestamp'", ")", "columns", "=", "[", "'completed_timestamp'", ",", "'cycle_time'", "]", "+", "columns", "data", "=", "(", "cycle_data", "[", "columns", "]", ".", "dropna", "(", "subset", "=", "[", "'cycle_time'", ",", "'completed_timestamp'", "]", ")", ".", "rename", "(", "columns", "=", "{", "'completed_timestamp'", ":", "'completed_date'", "}", ")", ")", "data", "[", "'cycle_time'", "]", "=", "data", "[", "'cycle_time'", "]", ".", "astype", "(", "'timedelta64[D]'", ")", "data", "[", "'completed_date'", "]", "=", "data", "[", "'completed_date'", "]", ".", "map", "(", "pd", ".", "Timestamp", ".", "date", ")", "return", "data" ]
Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`.
[ "Return", "scatterplot", "data", "for", "the", "cycle", "times", "in", "cycle_data", ".", "Returns", "a", "data", "frame", "containing", "only", "those", "items", "in", "cycle_data", "where", "values", "are", "set", "for", "completed_timestamp", "and", "cycle_time", "and", "with", "those", "two", "columns", "as", "the", "first", "two", "both", "normalised", "to", "whole", "days", "and", "with", "completed_timestamp", "renamed", "to", "completed_date", "." ]
56443211b3e1200f3def79173a21e0232332ae17
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L681-L703
train
deep-compute/logagg
logagg/nsqsender.py
NSQSender._is_ready
def _is_ready(self, topic_name): ''' Is NSQ running and have space to receive messages? ''' url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name) #Cheacking for ephmeral channels if '#' in topic_name: topic_name, tag =topic_name.split("#", 1) try: data = self.session.get(url).json() ''' data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \ u'health': u'OK', u'topics': [{u'message_count': 19019, \ u'paused': False, u'topic_name': u'test_topic', u'channels': [], \ u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \ u'percentiles': None}}]} ''' topics = data.get('topics', []) topics = [t for t in topics if t['topic_name'] == topic_name] if not topics: raise Exception('topic_missing_at_nsq') topic = topics[0] depth = topic['depth'] depth += sum(c.get('depth', 0) for c in topic['channels']) self.log.debug('nsq_depth_check', topic=topic_name, depth=depth, max_depth=self.nsq_max_depth) if depth < self.nsq_max_depth: return else: raise Exception('nsq_is_full_waiting_to_clear') except: raise
python
def _is_ready(self, topic_name): ''' Is NSQ running and have space to receive messages? ''' url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name) #Cheacking for ephmeral channels if '#' in topic_name: topic_name, tag =topic_name.split("#", 1) try: data = self.session.get(url).json() ''' data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \ u'health': u'OK', u'topics': [{u'message_count': 19019, \ u'paused': False, u'topic_name': u'test_topic', u'channels': [], \ u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \ u'percentiles': None}}]} ''' topics = data.get('topics', []) topics = [t for t in topics if t['topic_name'] == topic_name] if not topics: raise Exception('topic_missing_at_nsq') topic = topics[0] depth = topic['depth'] depth += sum(c.get('depth', 0) for c in topic['channels']) self.log.debug('nsq_depth_check', topic=topic_name, depth=depth, max_depth=self.nsq_max_depth) if depth < self.nsq_max_depth: return else: raise Exception('nsq_is_full_waiting_to_clear') except: raise
[ "def", "_is_ready", "(", "self", ",", "topic_name", ")", ":", "url", "=", "'http://%s/stats?format=json&topic=%s'", "%", "(", "self", ".", "nsqd_http_address", ",", "topic_name", ")", "#Cheacking for ephmeral channels", "if", "'#'", "in", "topic_name", ":", "topic_name", ",", "tag", "=", "topic_name", ".", "split", "(", "\"#\"", ",", "1", ")", "try", ":", "data", "=", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")", "'''\n data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \\\n u'health': u'OK', u'topics': [{u'message_count': 19019, \\\n u'paused': False, u'topic_name': u'test_topic', u'channels': [], \\\n u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \\\n u'percentiles': None}}]}\n '''", "topics", "=", "data", ".", "get", "(", "'topics'", ",", "[", "]", ")", "topics", "=", "[", "t", "for", "t", "in", "topics", "if", "t", "[", "'topic_name'", "]", "==", "topic_name", "]", "if", "not", "topics", ":", "raise", "Exception", "(", "'topic_missing_at_nsq'", ")", "topic", "=", "topics", "[", "0", "]", "depth", "=", "topic", "[", "'depth'", "]", "depth", "+=", "sum", "(", "c", ".", "get", "(", "'depth'", ",", "0", ")", "for", "c", "in", "topic", "[", "'channels'", "]", ")", "self", ".", "log", ".", "debug", "(", "'nsq_depth_check'", ",", "topic", "=", "topic_name", ",", "depth", "=", "depth", ",", "max_depth", "=", "self", ".", "nsq_max_depth", ")", "if", "depth", "<", "self", ".", "nsq_max_depth", ":", "return", "else", ":", "raise", "Exception", "(", "'nsq_is_full_waiting_to_clear'", ")", "except", ":", "raise" ]
Is NSQ running and have space to receive messages?
[ "Is", "NSQ", "running", "and", "have", "space", "to", "receive", "messages?" ]
7863bc1b5ddf3e67c4d4b55746799304180589a0
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/nsqsender.py#L39-L74
train
cimm-kzn/CGRtools
CGRtools/containers/query.py
QueryContainer._matcher
def _matcher(self, other): """ QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, MoleculeContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, (QueryContainer, QueryCGRContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only query-molecule, query-query or query-cgr_query possible')
python
def _matcher(self, other): """ QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, MoleculeContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, (QueryContainer, QueryCGRContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only query-molecule, query-query or query-cgr_query possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "MoleculeContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ")", "elif", "isinstance", "(", "other", ",", "(", "QueryContainer", ",", "QueryCGRContainer", ")", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only query-molecule, query-query or query-cgr_query possible'", ")" ]
QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general]
[ "QueryContainer", "<", "MoleculeContainer", "QueryContainer", "<", "QueryContainer", "[", "more", "general", "]", "QueryContainer", "<", "QueryCGRContainer", "[", "more", "general", "]" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/query.py#L31-L41
train
cimm-kzn/CGRtools
CGRtools/containers/query.py
QueryCGRContainer._matcher
def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
python
def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ")", "elif", "isinstance", "(", "other", ",", "QueryCGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only cgr_query-cgr or cgr_query-cgr_query possible'", ")" ]
QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general]
[ "QueryCGRContainer", "<", "CGRContainer", "QueryContainer", "<", "QueryCGRContainer", "[", "more", "general", "]" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/query.py#L48-L57
train
cimm-kzn/CGRtools
CGRtools/algorithms/calculate2d.py
Calculate2D.calculate2d
def calculate2d(self, force=False, scale=1): """ recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms """ dist = {} # length forces for n, m_bond in self._adj.items(): dist[n] = {} for m in m_bond: dist[n][m] = .825 # angle forces for n, m_bond in self._adj.items(): if len(m_bond) == 2: # single-single or single-double bonds has angle = 120, other 180 (m1, b1), (m2, b2) = m_bond.items() dist[m1][m2] = dist[m2][m1] = 1.43 if b1.order + b2.order in (2, 3) else 1.7 # +.05 elif len(m_bond) == 3: m1, m2, m3 = m_bond dist[m1][m2] = dist[m1][m3] = dist[m2][m3] = dist[m3][m2] = dist[m2][m1] = dist[m3][m1] = 1.43 elif len(m_bond) == 4: # 1 # # 2 X 4 # # 3 m1, m2, m3, m4 = m_bond dist[m1][m2] = dist[m1][m4] = dist[m2][m1] = dist[m2][m3] = 1.17 dist[m3][m2] = dist[m3][m4] = dist[m4][m1] = dist[m4][m3] = 1.17 dist[m1][m3] = dist[m3][m1] = dist[m2][m4] = dist[m4][m2] = 1.7 # +.05 # cycle forces for r in self.sssr: if len(r) == 6: # 6 # # 1 5 # # 2 4 # # 3 m1, m2, m3, m4, m5, m6 = r dist[m1][m4] = dist[m4][m1] = dist[m2][m5] = dist[m5][m2] = dist[m3][m6] = dist[m6][m3] = 1.7 # +.05 if force: pos = None else: pos = {n: (atom.x or uniform(0, .01), atom.y or uniform(0, .01)) for n, atom in self.atoms()} for n, xy in kamada_kawai_layout(self, dist=dict(dist), pos=pos, scale=scale).items(): atom = self._node[n] atom.x, atom.y = xy self.flush_cache()
python
def calculate2d(self, force=False, scale=1): """ recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms """ dist = {} # length forces for n, m_bond in self._adj.items(): dist[n] = {} for m in m_bond: dist[n][m] = .825 # angle forces for n, m_bond in self._adj.items(): if len(m_bond) == 2: # single-single or single-double bonds has angle = 120, other 180 (m1, b1), (m2, b2) = m_bond.items() dist[m1][m2] = dist[m2][m1] = 1.43 if b1.order + b2.order in (2, 3) else 1.7 # +.05 elif len(m_bond) == 3: m1, m2, m3 = m_bond dist[m1][m2] = dist[m1][m3] = dist[m2][m3] = dist[m3][m2] = dist[m2][m1] = dist[m3][m1] = 1.43 elif len(m_bond) == 4: # 1 # # 2 X 4 # # 3 m1, m2, m3, m4 = m_bond dist[m1][m2] = dist[m1][m4] = dist[m2][m1] = dist[m2][m3] = 1.17 dist[m3][m2] = dist[m3][m4] = dist[m4][m1] = dist[m4][m3] = 1.17 dist[m1][m3] = dist[m3][m1] = dist[m2][m4] = dist[m4][m2] = 1.7 # +.05 # cycle forces for r in self.sssr: if len(r) == 6: # 6 # # 1 5 # # 2 4 # # 3 m1, m2, m3, m4, m5, m6 = r dist[m1][m4] = dist[m4][m1] = dist[m2][m5] = dist[m5][m2] = dist[m3][m6] = dist[m6][m3] = 1.7 # +.05 if force: pos = None else: pos = {n: (atom.x or uniform(0, .01), atom.y or uniform(0, .01)) for n, atom in self.atoms()} for n, xy in kamada_kawai_layout(self, dist=dict(dist), pos=pos, scale=scale).items(): atom = self._node[n] atom.x, atom.y = xy self.flush_cache()
[ "def", "calculate2d", "(", "self", ",", "force", "=", "False", ",", "scale", "=", "1", ")", ":", "dist", "=", "{", "}", "# length forces", "for", "n", ",", "m_bond", "in", "self", ".", "_adj", ".", "items", "(", ")", ":", "dist", "[", "n", "]", "=", "{", "}", "for", "m", "in", "m_bond", ":", "dist", "[", "n", "]", "[", "m", "]", "=", ".825", "# angle forces", "for", "n", ",", "m_bond", "in", "self", ".", "_adj", ".", "items", "(", ")", ":", "if", "len", "(", "m_bond", ")", "==", "2", ":", "# single-single or single-double bonds has angle = 120, other 180", "(", "m1", ",", "b1", ")", ",", "(", "m2", ",", "b2", ")", "=", "m_bond", ".", "items", "(", ")", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "1.43", "if", "b1", ".", "order", "+", "b2", ".", "order", "in", "(", "2", ",", "3", ")", "else", "1.7", "# +.05", "elif", "len", "(", "m_bond", ")", "==", "3", ":", "m1", ",", "m2", ",", "m3", "=", "m_bond", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m1", "]", "[", "m3", "]", "=", "dist", "[", "m2", "]", "[", "m3", "]", "=", "dist", "[", "m3", "]", "[", "m2", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "dist", "[", "m3", "]", "[", "m1", "]", "=", "1.43", "elif", "len", "(", "m_bond", ")", "==", "4", ":", "# 1", "#", "# 2 X 4", "#", "# 3", "m1", ",", "m2", ",", "m3", ",", "m4", "=", "m_bond", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m1", "]", "[", "m4", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m3", "]", "=", "1.17", "dist", "[", "m3", "]", "[", "m2", "]", "=", "dist", "[", "m3", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m1", "]", "=", "dist", "[", "m4", "]", "[", "m3", "]", "=", "1.17", "dist", "[", "m1", "]", "[", "m3", "]", "=", "dist", "[", "m3", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m2", "]", "=", "1.7", "# +.05", "# cycle forces", "for", "r", "in", "self", ".", "sssr", ":", "if", "len", "(", "r", ")", "==", "6", ":", "# 6", "#", "# 1 5", "#", "# 2 4", "#", "# 3", "m1", ",", "m2", ",", "m3", ",", "m4", ",", "m5", ",", "m6", "=", "r", "dist", "[", "m1", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m5", "]", "=", "dist", "[", "m5", "]", "[", "m2", "]", "=", "dist", "[", "m3", "]", "[", "m6", "]", "=", "dist", "[", "m6", "]", "[", "m3", "]", "=", "1.7", "# +.05", "if", "force", ":", "pos", "=", "None", "else", ":", "pos", "=", "{", "n", ":", "(", "atom", ".", "x", "or", "uniform", "(", "0", ",", ".01", ")", ",", "atom", ".", "y", "or", "uniform", "(", "0", ",", ".01", ")", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", "}", "for", "n", ",", "xy", "in", "kamada_kawai_layout", "(", "self", ",", "dist", "=", "dict", "(", "dist", ")", ",", "pos", "=", "pos", ",", "scale", "=", "scale", ")", ".", "items", "(", ")", ":", "atom", "=", "self", ".", "_node", "[", "n", "]", "atom", ".", "x", ",", "atom", ".", "y", "=", "xy", "self", ".", "flush_cache", "(", ")" ]
recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms
[ "recalculate", "2d", "coordinates", ".", "currently", "rings", "can", "be", "calculated", "badly", "." ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/calculate2d.py#L24-L79
train
LordDarkula/chess_py
chess_py/pieces/knight.py
Knight.possible_moves
def possible_moves(self, position): """ Finds all possible knight moves :type: position Board :rtype: list """ for direction in [0, 1, 2, 3]: angles = self._rotate_direction_ninety_degrees(direction) for angle in angles: try: end_loc = self.location.shift(angle).shift(direction).shift(direction) if position.is_square_empty(end_loc): status = notation_const.MOVEMENT elif not position.piece_at_square(end_loc).color == self.color: status = notation_const.CAPTURE else: continue yield Move(end_loc=end_loc, piece=self, status=status, start_loc=self.location) except IndexError: pass
python
def possible_moves(self, position): """ Finds all possible knight moves :type: position Board :rtype: list """ for direction in [0, 1, 2, 3]: angles = self._rotate_direction_ninety_degrees(direction) for angle in angles: try: end_loc = self.location.shift(angle).shift(direction).shift(direction) if position.is_square_empty(end_loc): status = notation_const.MOVEMENT elif not position.piece_at_square(end_loc).color == self.color: status = notation_const.CAPTURE else: continue yield Move(end_loc=end_loc, piece=self, status=status, start_loc=self.location) except IndexError: pass
[ "def", "possible_moves", "(", "self", ",", "position", ")", ":", "for", "direction", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "angles", "=", "self", ".", "_rotate_direction_ninety_degrees", "(", "direction", ")", "for", "angle", "in", "angles", ":", "try", ":", "end_loc", "=", "self", ".", "location", ".", "shift", "(", "angle", ")", ".", "shift", "(", "direction", ")", ".", "shift", "(", "direction", ")", "if", "position", ".", "is_square_empty", "(", "end_loc", ")", ":", "status", "=", "notation_const", ".", "MOVEMENT", "elif", "not", "position", ".", "piece_at_square", "(", "end_loc", ")", ".", "color", "==", "self", ".", "color", ":", "status", "=", "notation_const", ".", "CAPTURE", "else", ":", "continue", "yield", "Move", "(", "end_loc", "=", "end_loc", ",", "piece", "=", "self", ",", "status", "=", "status", ",", "start_loc", "=", "self", ".", "location", ")", "except", "IndexError", ":", "pass" ]
Finds all possible knight moves :type: position Board :rtype: list
[ "Finds", "all", "possible", "knight", "moves", ":", "type", ":", "position", "Board", ":", "rtype", ":", "list" ]
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/knight.py#L57-L81
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.centers_list
def centers_list(self): """ get a list of lists of atoms of reaction centers """ center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
python
def centers_list(self): """ get a list of lists of atoms of reaction centers """ center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
[ "def", "centers_list", "(", "self", ")", ":", "center", "=", "set", "(", ")", "adj", "=", "defaultdict", "(", "set", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "atom", ".", "_reactant", "!=", "atom", ".", "_product", ":", "center", ".", "add", "(", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", ":", "adj", "[", "n", "]", ".", "add", "(", "m", ")", "adj", "[", "m", "]", ".", "add", "(", "n", ")", "center", ".", "add", "(", "n", ")", "center", ".", "add", "(", "m", ")", "out", "=", "[", "]", "while", "center", ":", "n", "=", "center", ".", "pop", "(", ")", "if", "n", "in", "adj", ":", "c", "=", "set", "(", "self", ".", "__plain_bfs", "(", "adj", ",", "n", ")", ")", "out", ".", "append", "(", "list", "(", "c", ")", ")", "center", ".", "difference_update", "(", "c", ")", "else", ":", "out", ".", "append", "(", "[", "n", "]", ")", "return", "out" ]
get a list of lists of atoms of reaction centers
[ "get", "a", "list", "of", "lists", "of", "atoms", "of", "reaction", "centers" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L37-L63
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.center_atoms
def center_atoms(self): """ get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). """ nodes = set() for n, atom in self.atoms(): if atom._reactant != atom._product: nodes.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: nodes.add(n) nodes.add(m) return list(nodes)
python
def center_atoms(self): """ get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). """ nodes = set() for n, atom in self.atoms(): if atom._reactant != atom._product: nodes.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: nodes.add(n) nodes.add(m) return list(nodes)
[ "def", "center_atoms", "(", "self", ")", ":", "nodes", "=", "set", "(", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "atom", ".", "_reactant", "!=", "atom", ".", "_product", ":", "nodes", ".", "add", "(", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", ":", "nodes", ".", "add", "(", "n", ")", "nodes", ".", "add", "(", "m", ")", "return", "list", "(", "nodes", ")" ]
get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals).
[ "get", "list", "of", "atoms", "of", "reaction", "center", "(", "atoms", "with", "dynamic", ":", "bonds", "charges", "radicals", ")", "." ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L66-L79
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.center_bonds
def center_bonds(self): """ get list of bonds of reaction center (bonds with dynamic orders). """ return [(n, m) for n, m, bond in self.bonds() if bond._reactant != bond._product]
python
def center_bonds(self): """ get list of bonds of reaction center (bonds with dynamic orders). """ return [(n, m) for n, m, bond in self.bonds() if bond._reactant != bond._product]
[ "def", "center_bonds", "(", "self", ")", ":", "return", "[", "(", "n", ",", "m", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", "]" ]
get list of bonds of reaction center (bonds with dynamic orders).
[ "get", "list", "of", "bonds", "of", "reaction", "center", "(", "bonds", "with", "dynamic", "orders", ")", "." ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L82-L85
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.reset_query_marks
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 p_neighbors = 0 p_hybridization = 1 # hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): isnth = self._node[j].element != 'H' order = bond.order if order: if isnth: neighbors += 1 if hybridization not in (3, 4): if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 order = bond.p_order if order: if isnth: p_neighbors += 1 if p_hybridization not in (3, 4): if order == 4: p_hybridization = 4 elif order == 3: p_hybridization = 3 elif order == 2: if p_hybridization == 2: p_hybridization = 3 else: p_hybridization = 2 atom._reactant._neighbors = neighbors atom._reactant._hybridization = hybridization atom._product._neighbors = p_neighbors atom._product._hybridization = p_hybridization atom.__dict__.clear() # flush cache self.flush_cache()
python
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 p_neighbors = 0 p_hybridization = 1 # hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): isnth = self._node[j].element != 'H' order = bond.order if order: if isnth: neighbors += 1 if hybridization not in (3, 4): if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 order = bond.p_order if order: if isnth: p_neighbors += 1 if p_hybridization not in (3, 4): if order == 4: p_hybridization = 4 elif order == 3: p_hybridization = 3 elif order == 2: if p_hybridization == 2: p_hybridization = 3 else: p_hybridization = 2 atom._reactant._neighbors = neighbors atom._reactant._hybridization = hybridization atom._product._neighbors = p_neighbors atom._product._hybridization = p_hybridization atom.__dict__.clear() # flush cache self.flush_cache()
[ "def", "reset_query_marks", "(", "self", ")", ":", "for", "i", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "neighbors", "=", "0", "hybridization", "=", "1", "p_neighbors", "=", "0", "p_hybridization", "=", "1", "# hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic", "for", "j", ",", "bond", "in", "self", ".", "_adj", "[", "i", "]", ".", "items", "(", ")", ":", "isnth", "=", "self", ".", "_node", "[", "j", "]", ".", "element", "!=", "'H'", "order", "=", "bond", ".", "order", "if", "order", ":", "if", "isnth", ":", "neighbors", "+=", "1", "if", "hybridization", "not", "in", "(", "3", ",", "4", ")", ":", "if", "order", "==", "4", ":", "hybridization", "=", "4", "elif", "order", "==", "3", ":", "hybridization", "=", "3", "elif", "order", "==", "2", ":", "if", "hybridization", "==", "2", ":", "hybridization", "=", "3", "else", ":", "hybridization", "=", "2", "order", "=", "bond", ".", "p_order", "if", "order", ":", "if", "isnth", ":", "p_neighbors", "+=", "1", "if", "p_hybridization", "not", "in", "(", "3", ",", "4", ")", ":", "if", "order", "==", "4", ":", "p_hybridization", "=", "4", "elif", "order", "==", "3", ":", "p_hybridization", "=", "3", "elif", "order", "==", "2", ":", "if", "p_hybridization", "==", "2", ":", "p_hybridization", "=", "3", "else", ":", "p_hybridization", "=", "2", "atom", ".", "_reactant", ".", "_neighbors", "=", "neighbors", "atom", ".", "_reactant", ".", "_hybridization", "=", "hybridization", "atom", ".", "_product", ".", "_neighbors", "=", "p_neighbors", "atom", ".", "_product", ".", "_hybridization", "=", "p_hybridization", "atom", ".", "__dict__", ".", "clear", "(", ")", "# flush cache", "self", ".", "flush_cache", "(", ")" ]
set or reset hyb and neighbors marks to atoms.
[ "set", "or", "reset", "hyb", "and", "neighbors", "marks", "to", "atoms", "." ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L87-L134
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.substructure
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data """ s = super().substructure(atoms, meta, as_view) if as_view: s.reset_query_marks = frozen return s
python
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data """ s = super().substructure(atoms, meta, as_view) if as_view: s.reset_query_marks = frozen return s
[ "def", "substructure", "(", "self", ",", "atoms", ",", "meta", "=", "False", ",", "as_view", "=", "True", ")", ":", "s", "=", "super", "(", ")", ".", "substructure", "(", "atoms", ",", "meta", ",", "as_view", ")", "if", "as_view", ":", "s", ".", "reset_query_marks", "=", "frozen", "return", "s" ]
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data
[ "create", "substructure", "containing", "atoms", "from", "nbunch", "list" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L136-L148
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer._matcher
def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
python
def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only cgr-cgr possible'", ")" ]
CGRContainer < CGRContainer
[ "CGRContainer", "<", "CGRContainer" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L150-L156
train
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.__plain_bfs
def __plain_bfs(adj, source): """modified NX fast BFS node generator""" seen = set() nextlevel = {source} while nextlevel: thislevel = nextlevel nextlevel = set() for v in thislevel: if v not in seen: yield v seen.add(v) nextlevel.update(adj[v])
python
def __plain_bfs(adj, source): """modified NX fast BFS node generator""" seen = set() nextlevel = {source} while nextlevel: thislevel = nextlevel nextlevel = set() for v in thislevel: if v not in seen: yield v seen.add(v) nextlevel.update(adj[v])
[ "def", "__plain_bfs", "(", "adj", ",", "source", ")", ":", "seen", "=", "set", "(", ")", "nextlevel", "=", "{", "source", "}", "while", "nextlevel", ":", "thislevel", "=", "nextlevel", "nextlevel", "=", "set", "(", ")", "for", "v", "in", "thislevel", ":", "if", "v", "not", "in", "seen", ":", "yield", "v", "seen", ".", "add", "(", "v", ")", "nextlevel", ".", "update", "(", "adj", "[", "v", "]", ")" ]
modified NX fast BFS node generator
[ "modified", "NX", "fast", "BFS", "node", "generator" ]
15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L159-L170
train
cocaine/cocaine-framework-python
cocaine/detail/defaults.py
DefaultOptions.token
def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
python
def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
[ "def", "token", "(", "self", ")", ":", "if", "self", ".", "_token", "is", "None", ":", "token_type", "=", "os", ".", "getenv", "(", "TOKEN_TYPE_KEY", ",", "''", ")", "token_body", "=", "os", ".", "getenv", "(", "TOKEN_BODY_KEY", ",", "''", ")", "self", ".", "_token", "=", "_Token", "(", "token_type", ",", "token_body", ")", "return", "self", ".", "_token" ]
Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body.
[ "Returns", "authorization", "token", "provided", "by", "Cocaine", "." ]
d8a30074b6338bac4389eb996e00d404338115e4
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/defaults.py#L116-L129
train
cocaine/cocaine-framework-python
cocaine/detail/logger.py
Logger._send
def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """ buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
python
def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """ buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
[ "def", "_send", "(", "self", ")", ":", "buff", "=", "BytesIO", "(", ")", "while", "True", ":", "msgs", "=", "list", "(", ")", "try", ":", "msg", "=", "yield", "self", ".", "queue", ".", "get", "(", ")", "# we need to connect first, as we issue verbosity request just after connection", "# and channels should strictly go in ascending order", "if", "not", "self", ".", "_connected", ":", "yield", "self", ".", "connect", "(", ")", "try", ":", "while", "True", ":", "msgs", ".", "append", "(", "msg", ")", "counter", "=", "next", "(", "self", ".", "counter", ")", "msgpack_pack", "(", "[", "counter", ",", "EMIT", ",", "msg", "]", ",", "buff", ")", "msg", "=", "self", ".", "queue", ".", "get_nowait", "(", ")", "except", "queues", ".", "QueueEmpty", ":", "pass", "try", ":", "yield", "self", ".", "pipe", ".", "write", "(", "buff", ".", "getvalue", "(", ")", ")", "except", "Exception", ":", "pass", "# clean the buffer or we will end up without memory", "buff", ".", "truncate", "(", "0", ")", "except", "Exception", ":", "for", "message", "in", "msgs", ":", "self", ".", "_log_to_fallback", "(", "message", ")" ]
Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string.
[ "Send", "a", "message", "lazy", "formatted", "with", "args", ".", "External", "log", "attributes", "can", "be", "passed", "via", "named", "attribute", "extra", "like", "in", "logging", "from", "the", "standart", "library", "." ]
d8a30074b6338bac4389eb996e00d404338115e4
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/logger.py#L147-L186
train
LordDarkula/chess_py
chess_py/pieces/rook.py
Rook.moves_in_direction
def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """ current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
python
def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """ current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
[ "def", "moves_in_direction", "(", "self", ",", "direction", ",", "position", ")", ":", "current_square", "=", "self", ".", "location", "while", "True", ":", "try", ":", "current_square", "=", "direction", "(", "current_square", ")", "except", "IndexError", ":", "return", "if", "self", ".", "contains_opposite_color_piece", "(", "current_square", ",", "position", ")", ":", "yield", "self", ".", "create_move", "(", "current_square", ",", "notation_const", ".", "CAPTURE", ")", "if", "not", "position", ".", "is_square_empty", "(", "current_square", ")", ":", "return", "yield", "self", ".", "create_move", "(", "current_square", ",", "notation_const", ".", "MOVEMENT", ")" ]
Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list
[ "Finds", "moves", "in", "a", "given", "direction" ]
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/rook.py#L48-L70
train
LordDarkula/chess_py
chess_py/pieces/rook.py
Rook.possible_moves
def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """ for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
python
def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """ for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
[ "def", "possible_moves", "(", "self", ",", "position", ")", ":", "for", "move", "in", "itertools", ".", "chain", "(", "*", "[", "self", ".", "moves_in_direction", "(", "fn", ",", "position", ")", "for", "fn", "in", "self", ".", "cross_fn", "]", ")", ":", "yield", "move" ]
Returns all possible rook moves. :type: position: Board :rtype: list
[ "Returns", "all", "possible", "rook", "moves", "." ]
14bebc2f8c49ae25c59375cc83d0b38d8ff7281d
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/rook.py#L72-L80
train
spacetelescope/synphot_refactor
synphot/utils.py
overlap_status
def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """ # Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
python
def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """ # Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
[ "def", "overlap_status", "(", "a", ",", "b", ")", ":", "# Get the endpoints", "a1", ",", "a2", "=", "a", ".", "min", "(", ")", ",", "a", ".", "max", "(", ")", "b1", ",", "b2", "=", "b", ".", "min", "(", ")", ",", "b", ".", "max", "(", ")", "# Do the comparison", "if", "a1", ">=", "b1", "and", "a2", "<=", "b2", ":", "result", "=", "'full'", "elif", "a2", "<", "b1", "or", "b2", "<", "a1", ":", "result", "=", "'none'", "else", ":", "result", "=", "'partial'", "return", "result" ]
Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b``
[ "Check", "overlap", "between", "two", "arrays", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L23-L51
train
spacetelescope/synphot_refactor
synphot/utils.py
validate_totalflux
def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
python
def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
[ "def", "validate_totalflux", "(", "totalflux", ")", ":", "if", "totalflux", "<=", "0.0", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is <= 0'", ")", "elif", "np", ".", "isnan", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is NaN'", ")", "elif", "np", ".", "isinf", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is infinite'", ")" ]
Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number.
[ "Check", "integrated", "flux", "for", "invalid", "values", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L54-L73
train
spacetelescope/synphot_refactor
synphot/utils.py
validate_wavelengths
def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
python
def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
[ "def", "validate_wavelengths", "(", "wavelengths", ")", ":", "if", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "units", ".", "validate_wave_unit", "(", "wavelengths", ".", "unit", ")", "wave", "=", "wavelengths", ".", "value", "else", ":", "wave", "=", "wavelengths", "if", "np", ".", "isscalar", "(", "wave", ")", ":", "wave", "=", "[", "wave", "]", "wave", "=", "np", ".", "asarray", "(", "wave", ")", "# Check for zeroes", "if", "np", ".", "any", "(", "wave", "<=", "0", ")", ":", "raise", "exceptions", ".", "ZeroWavelength", "(", "'Negative or zero wavelength occurs in wavelength array'", ",", "rows", "=", "np", ".", "where", "(", "wave", "<=", "0", ")", "[", "0", "]", ")", "# Check for monotonicity", "sorted_wave", "=", "np", ".", "sort", "(", "wave", ")", "if", "not", "np", ".", "alltrue", "(", "sorted_wave", "==", "wave", ")", ":", "if", "np", ".", "alltrue", "(", "sorted_wave", "[", ":", ":", "-", "1", "]", "==", "wave", ")", ":", "pass", "# Monotonic descending is allowed", "else", ":", "raise", "exceptions", ".", "UnsortedWavelength", "(", "'Wavelength array is not monotonic'", ",", "rows", "=", "np", ".", "where", "(", "sorted_wave", "!=", "wave", ")", "[", "0", "]", ")", "# Check for duplicate values", "if", "wave", ".", "size", ">", "1", ":", "dw", "=", "sorted_wave", "[", "1", ":", "]", "-", "sorted_wave", "[", ":", "-", "1", "]", "if", "np", ".", "any", "(", "dw", "==", "0", ")", ":", "raise", "exceptions", ".", "DuplicateWavelength", "(", "'Wavelength array contains duplicate entries'", ",", "rows", "=", "np", ".", "where", "(", "dw", "==", "0", ")", "[", "0", "]", ")" ]
Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array.
[ "Check", "wavelengths", "for", "synphot", "compatibility", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L76-L139
train
spacetelescope/synphot_refactor
synphot/utils.py
generate_wavelengths
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """ wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
python
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """ wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
[ "def", "generate_wavelengths", "(", "minwave", "=", "500", ",", "maxwave", "=", "26000", ",", "num", "=", "10000", ",", "delta", "=", "None", ",", "log", "=", "True", ",", "wave_unit", "=", "u", ".", "AA", ")", ":", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "if", "delta", "is", "not", "None", ":", "num", "=", "None", "waveset_str", "=", "'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'", ".", "format", "(", "minwave", ",", "maxwave", ",", "num", ",", "delta", ",", "log", ")", "# Log space", "if", "log", ":", "logmin", "=", "np", ".", "log10", "(", "minwave", ")", "logmax", "=", "np", ".", "log10", "(", "maxwave", ")", "if", "delta", "is", "None", ":", "waveset", "=", "np", ".", "logspace", "(", "logmin", ",", "logmax", ",", "num", ",", "endpoint", "=", "False", ")", "else", ":", "waveset", "=", "10", "**", "np", ".", "arange", "(", "logmin", ",", "logmax", ",", "delta", ")", "# Linear space", "else", ":", "if", "delta", "is", "None", ":", "waveset", "=", "np", ".", "linspace", "(", "minwave", ",", "maxwave", ",", "num", ",", "endpoint", "=", "False", ")", "else", ":", "waveset", "=", "np", ".", "arange", "(", "minwave", ",", "maxwave", ",", "delta", ")", "return", "waveset", ".", "astype", "(", "np", ".", "float64", ")", "*", "wave_unit", ",", "waveset_str" ]
Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result.
[ "Generate", "wavelength", "array", "to", "be", "used", "for", "spectrum", "sampling", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L142-L205
train
spacetelescope/synphot_refactor
synphot/utils.py
merge_wavelengths
def merge_wavelengths(waveset1, waveset2, threshold=1e-12): """Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. """ if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) # Remove "too close together" duplicates if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
python
def merge_wavelengths(waveset1, waveset2, threshold=1e-12): """Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. """ if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) # Remove "too close together" duplicates if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
[ "def", "merge_wavelengths", "(", "waveset1", ",", "waveset2", ",", "threshold", "=", "1e-12", ")", ":", "if", "waveset1", "is", "None", "and", "waveset2", "is", "None", ":", "out_wavelengths", "=", "None", "elif", "waveset1", "is", "not", "None", "and", "waveset2", "is", "None", ":", "out_wavelengths", "=", "waveset1", "elif", "waveset1", "is", "None", "and", "waveset2", "is", "not", "None", ":", "out_wavelengths", "=", "waveset2", "else", ":", "out_wavelengths", "=", "np", ".", "union1d", "(", "waveset1", ",", "waveset2", ")", "delta", "=", "out_wavelengths", "[", "1", ":", "]", "-", "out_wavelengths", "[", ":", "-", "1", "]", "i_good", "=", "np", ".", "where", "(", "delta", ">", "threshold", ")", "# Remove \"too close together\" duplicates", "if", "len", "(", "i_good", "[", "0", "]", ")", "<", "delta", ".", "size", ":", "out_wavelengths", "=", "np", ".", "append", "(", "out_wavelengths", "[", "i_good", "]", ",", "out_wavelengths", "[", "-", "1", "]", ")", "return", "out_wavelengths" ]
Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined.
[ "Return", "the", "union", "of", "the", "two", "sets", "of", "wavelengths", "using", ":", "func", ":", "numpy", ".", "union1d", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L208-L252
train
spacetelescope/synphot_refactor
synphot/utils.py
download_data
def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """ from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
python
def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """ from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
[ "def", "download_data", "(", "cdbs_root", ",", "verbose", "=", "True", ",", "dry_run", "=", "False", ")", ":", "from", ".", "config", "import", "conf", "# Avoid potential circular import", "if", "not", "os", ".", "path", ".", "exists", "(", "cdbs_root", ")", ":", "os", ".", "makedirs", "(", "cdbs_root", ",", "exist_ok", "=", "True", ")", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'Created {}'", ".", "format", "(", "cdbs_root", ")", ")", "elif", "not", "os", ".", "path", ".", "isdir", "(", "cdbs_root", ")", ":", "raise", "OSError", "(", "'{} must be a directory'", ".", "format", "(", "cdbs_root", ")", ")", "host", "=", "'http://ssb.stsci.edu/cdbs/'", "file_list", "=", "[", "]", "if", "not", "cdbs_root", ".", "endswith", "(", "os", ".", "sep", ")", ":", "cdbs_root", "+=", "os", ".", "sep", "# See https://github.com/astropy/astropy/issues/8524", "for", "cfgitem", "in", "conf", ".", "__class__", ".", "__dict__", ".", "values", "(", ")", ":", "if", "(", "not", "isinstance", "(", "cfgitem", ",", "ConfigItem", ")", "or", "not", "cfgitem", ".", "name", ".", "endswith", "(", "'file'", ")", ")", ":", "continue", "url", "=", "cfgitem", "(", ")", "if", "not", "url", ".", "startswith", "(", "host", ")", ":", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} is not from {}, skipping download'", ".", "format", "(", "url", ",", "host", ")", ")", "continue", "dst", "=", "url", ".", "replace", "(", "host", ",", "cdbs_root", ")", ".", "replace", "(", "'/'", ",", "os", ".", "sep", ")", "if", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} already exists, skipping download'", ".", "format", "(", "dst", ")", ")", "continue", "# Create sub-directories, if needed.", "subdirs", "=", "os", ".", "path", ".", "dirname", "(", "dst", ")", "os", ".", "makedirs", "(", "subdirs", ",", "exist_ok", "=", "True", ")", "if", "not", "dry_run", ":", "# pragma: no cover", "try", ":", "src", "=", "download_file", "(", "url", ")", "copyfile", "(", "src", ",", "dst", ")", "except", "Exception", "as", "exc", ":", "print", "(", "'Download failed - {}'", ".", "format", "(", "str", "(", "exc", ")", ")", ")", "continue", "file_list", ".", "append", "(", "dst", ")", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} downloaded to {}'", ".", "format", "(", "url", ",", "dst", ")", ")", "return", "file_list" ]
Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files.
[ "Download", "CDBS", "data", "files", "to", "given", "root", "directory", ".", "Download", "is", "skipped", "if", "a", "data", "file", "already", "exists", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L255-L336
train
Julius2342/pyvlx
examples/demo.py
main
async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
python
async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
[ "async", "def", "main", "(", "loop", ")", ":", "pyvlx", "=", "PyVLX", "(", "'pyvlx.yaml'", ",", "loop", "=", "loop", ")", "# Alternative:", "# pyvlx = PyVLX(host=\"192.168.2.127\", password=\"velux123\", loop=loop)", "# Runing scenes:", "await", "pyvlx", ".", "load_scenes", "(", ")", "await", "pyvlx", ".", "scenes", "[", "\"All Windows Closed\"", "]", ".", "run", "(", ")", "# Changing position of windows:", "await", "pyvlx", ".", "load_nodes", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "open", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "close", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "set_position", "(", "Position", "(", "position_percent", "=", "45", ")", ")", "# Changing of on-off switches:", "# await pyvlx.nodes['CoffeeMaker'].set_on()", "# await pyvlx.nodes['CoffeeMaker'].set_off()", "# You can easily rename nodes:", "# await pyvlx.nodes[\"Window 10\"].rename(\"Window 11\")", "await", "pyvlx", ".", "disconnect", "(", ")" ]
Demonstrate functionality of PyVLX.
[ "Demonstrate", "functionality", "of", "PyVLX", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/examples/demo.py#L7-L30
train
Julius2342/pyvlx
pyvlx/frames/frame_password_enter.py
FramePasswordEnterRequest.get_payload
def get_payload(self): """Return Payload.""" if self.password is None: raise PyVLXException("password is none") if len(self.password) > self.MAX_SIZE: raise PyVLXException("password is too long") return string_to_bytes(self.password, self.MAX_SIZE)
python
def get_payload(self): """Return Payload.""" if self.password is None: raise PyVLXException("password is none") if len(self.password) > self.MAX_SIZE: raise PyVLXException("password is too long") return string_to_bytes(self.password, self.MAX_SIZE)
[ "def", "get_payload", "(", "self", ")", ":", "if", "self", ".", "password", "is", "None", ":", "raise", "PyVLXException", "(", "\"password is none\"", ")", "if", "len", "(", "self", ".", "password", ")", ">", "self", ".", "MAX_SIZE", ":", "raise", "PyVLXException", "(", "\"password is too long\"", ")", "return", "string_to_bytes", "(", "self", ".", "password", ",", "self", ".", "MAX_SIZE", ")" ]
Return Payload.
[ "Return", "Payload", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_password_enter.py#L22-L28
train
Julius2342/pyvlx
pyvlx/nodes.py
Nodes.add
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
python
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
[ "def", "add", "(", "self", ",", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "Node", ")", ":", "raise", "TypeError", "(", ")", "for", "i", ",", "j", "in", "enumerate", "(", "self", ".", "__nodes", ")", ":", "if", "j", ".", "node_id", "==", "node", ".", "node_id", ":", "self", ".", "__nodes", "[", "i", "]", "=", "node", "return", "self", ".", "__nodes", ".", "append", "(", "node", ")" ]
Add Node, replace existing node if node with node_id is present.
[ "Add", "Node", "replace", "existing", "node", "if", "node", "with", "node_id", "is", "present", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L51-L59
train
Julius2342/pyvlx
pyvlx/nodes.py
Nodes.load
async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded.""" if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
python
async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded.""" if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
[ "async", "def", "load", "(", "self", ",", "node_id", "=", "None", ")", ":", "if", "node_id", "is", "not", "None", ":", "await", "self", ".", "_load_node", "(", "node_id", "=", "node_id", ")", "else", ":", "await", "self", ".", "_load_all_nodes", "(", ")" ]
Load nodes from KLF 200, if no node_id is specified all nodes are loaded.
[ "Load", "nodes", "from", "KLF", "200", "if", "no", "node_id", "is", "specified", "all", "nodes", "are", "loaded", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L65-L70
train
Julius2342/pyvlx
pyvlx/nodes.py
Nodes._load_node
async def _load_node(self, node_id): """Load single node via API.""" get_node_information = GetNodeInformation(pyvlx=self.pyvlx, node_id=node_id) await get_node_information.do_api_call() if not get_node_information.success: raise PyVLXException("Unable to retrieve node information") notification_frame = get_node_information.notification_frame node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
python
async def _load_node(self, node_id): """Load single node via API.""" get_node_information = GetNodeInformation(pyvlx=self.pyvlx, node_id=node_id) await get_node_information.do_api_call() if not get_node_information.success: raise PyVLXException("Unable to retrieve node information") notification_frame = get_node_information.notification_frame node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
[ "async", "def", "_load_node", "(", "self", ",", "node_id", ")", ":", "get_node_information", "=", "GetNodeInformation", "(", "pyvlx", "=", "self", ".", "pyvlx", ",", "node_id", "=", "node_id", ")", "await", "get_node_information", ".", "do_api_call", "(", ")", "if", "not", "get_node_information", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to retrieve node information\"", ")", "notification_frame", "=", "get_node_information", ".", "notification_frame", "node", "=", "convert_frame_to_node", "(", "self", ".", "pyvlx", ",", "notification_frame", ")", "if", "node", "is", "not", "None", ":", "self", ".", "add", "(", "node", ")" ]
Load single node via API.
[ "Load", "single", "node", "via", "API", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L72-L81
train
Julius2342/pyvlx
pyvlx/nodes.py
Nodes._load_all_nodes
async def _load_all_nodes(self): """Load all nodes via API.""" get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx) await get_all_nodes_information.do_api_call() if not get_all_nodes_information.success: raise PyVLXException("Unable to retrieve node information") self.clear() for notification_frame in get_all_nodes_information.notification_frames: node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
python
async def _load_all_nodes(self): """Load all nodes via API.""" get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx) await get_all_nodes_information.do_api_call() if not get_all_nodes_information.success: raise PyVLXException("Unable to retrieve node information") self.clear() for notification_frame in get_all_nodes_information.notification_frames: node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
[ "async", "def", "_load_all_nodes", "(", "self", ")", ":", "get_all_nodes_information", "=", "GetAllNodesInformation", "(", "pyvlx", "=", "self", ".", "pyvlx", ")", "await", "get_all_nodes_information", ".", "do_api_call", "(", ")", "if", "not", "get_all_nodes_information", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to retrieve node information\"", ")", "self", ".", "clear", "(", ")", "for", "notification_frame", "in", "get_all_nodes_information", ".", "notification_frames", ":", "node", "=", "convert_frame_to_node", "(", "self", ".", "pyvlx", ",", "notification_frame", ")", "if", "node", "is", "not", "None", ":", "self", ".", "add", "(", "node", ")" ]
Load all nodes via API.
[ "Load", "all", "nodes", "via", "API", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L83-L93
train
Julius2342/pyvlx
pyvlx/get_all_nodes_information.py
GetAllNodesInformation.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationConfirmation", ")", ":", "self", ".", "number_of_nodes", "=", "frame", ".", "number_of_nodes", "# We are still waiting for FrameGetAllNodesInformationNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationNotification", ")", ":", "self", ".", "notification_frames", ".", "append", "(", "frame", ")", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationFinishedNotification", ")", ":", "if", "self", ".", "number_of_nodes", "!=", "len", "(", "self", ".", "notification_frames", ")", ":", "PYVLXLOG", ".", "warning", "(", "\"Number of received scenes does not match expected number\"", ")", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_all_nodes_information.py#L21-L34
train
Julius2342/pyvlx
pyvlx/get_node_information.py
GetNodeInformation.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetNodeInformationConfirmation) and frame.node_id == self.node_id: # We are still waiting for GetNodeInformationNotification return False if isinstance(frame, FrameGetNodeInformationNotification) and frame.node_id == self.node_id: self.notification_frame = frame self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetNodeInformationConfirmation) and frame.node_id == self.node_id: # We are still waiting for GetNodeInformationNotification return False if isinstance(frame, FrameGetNodeInformationNotification) and frame.node_id == self.node_id: self.notification_frame = frame self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetNodeInformationConfirmation", ")", "and", "frame", ".", "node_id", "==", "self", ".", "node_id", ":", "# We are still waiting for GetNodeInformationNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetNodeInformationNotification", ")", "and", "frame", ".", "node_id", "==", "self", ".", "node_id", ":", "self", ".", "notification_frame", "=", "frame", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_node_information.py#L18-L27
train
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.start
def start(self): """Create loop task.""" self.run_task = self.pyvlx.loop.create_task( self.loop())
python
def start(self): """Create loop task.""" self.run_task = self.pyvlx.loop.create_task( self.loop())
[ "def", "start", "(", "self", ")", ":", "self", ".", "run_task", "=", "self", ".", "pyvlx", ".", "loop", ".", "create_task", "(", "self", ".", "loop", "(", ")", ")" ]
Create loop task.
[ "Create", "loop", "task", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L25-L28
train
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.stop
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
python
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
[ "async", "def", "stop", "(", "self", ")", ":", "self", ".", "stopped", "=", "True", "self", ".", "loop_event", ".", "set", "(", ")", "# Waiting for shutdown of loop()", "await", "self", ".", "stopped_event", ".", "wait", "(", ")" ]
Stop heartbeat.
[ "Stop", "heartbeat", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L30-L35
train
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.loop
async def loop(self): """Pulse every timeout seconds until stopped.""" while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
python
async def loop(self): """Pulse every timeout seconds until stopped.""" while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
[ "async", "def", "loop", "(", "self", ")", ":", "while", "not", "self", ".", "stopped", ":", "self", ".", "timeout_handle", "=", "self", ".", "pyvlx", ".", "connection", ".", "loop", ".", "call_later", "(", "self", ".", "timeout_in_seconds", ",", "self", ".", "loop_timeout", ")", "await", "self", ".", "loop_event", ".", "wait", "(", ")", "if", "not", "self", ".", "stopped", ":", "self", ".", "loop_event", ".", "clear", "(", ")", "await", "self", ".", "pulse", "(", ")", "self", ".", "cancel_loop_timeout", "(", ")", "self", ".", "stopped_event", ".", "set", "(", ")" ]
Pulse every timeout seconds until stopped.
[ "Pulse", "every", "timeout", "seconds", "until", "stopped", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L37-L47
train
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.pulse
async def pulse(self): """Send get state request to API to keep the connection alive.""" get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
python
async def pulse(self): """Send get state request to API to keep the connection alive.""" get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
[ "async", "def", "pulse", "(", "self", ")", ":", "get_state", "=", "GetState", "(", "pyvlx", "=", "self", ".", "pyvlx", ")", "await", "get_state", ".", "do_api_call", "(", ")", "if", "not", "get_state", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to send get state.\"", ")" ]
Send get state request to API to keep the connection alive.
[ "Send", "get", "state", "request", "to", "API", "to", "keep", "the", "connection", "alive", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L59-L64
train
Julius2342/pyvlx
pyvlx/frames/frame_get_state.py
FrameGetStateConfirmation.get_payload
def get_payload(self): """Return Payload.""" payload = bytes([self.gateway_state.value, self.gateway_sub_state.value]) payload += bytes(4) # State date, reserved for future use return payload
python
def get_payload(self): """Return Payload.""" payload = bytes([self.gateway_state.value, self.gateway_sub_state.value]) payload += bytes(4) # State date, reserved for future use return payload
[ "def", "get_payload", "(", "self", ")", ":", "payload", "=", "bytes", "(", "[", "self", ".", "gateway_state", ".", "value", ",", "self", ".", "gateway_sub_state", ".", "value", "]", ")", "payload", "+=", "bytes", "(", "4", ")", "# State date, reserved for future use", "return", "payload" ]
Return Payload.
[ "Return", "Payload", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_state.py#L53-L57
train
Julius2342/pyvlx
pyvlx/frames/frame_get_state.py
FrameGetStateConfirmation.from_payload
def from_payload(self, payload): """Init frame from binary data.""" self.gateway_state = GatewayState(payload[0]) self.gateway_sub_state = GatewaySubState(payload[1])
python
def from_payload(self, payload): """Init frame from binary data.""" self.gateway_state = GatewayState(payload[0]) self.gateway_sub_state = GatewaySubState(payload[1])
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "gateway_state", "=", "GatewayState", "(", "payload", "[", "0", "]", ")", "self", ".", "gateway_sub_state", "=", "GatewaySubState", "(", "payload", "[", "1", "]", ")" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_state.py#L59-L62
train
Julius2342/pyvlx
pyvlx/string_helper.py
string_to_bytes
def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
python
def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
[ "def", "string_to_bytes", "(", "string", ",", "size", ")", ":", "if", "len", "(", "string", ")", ">", "size", ":", "raise", "PyVLXException", "(", "\"string_to_bytes::string_to_large\"", ")", "encoded", "=", "bytes", "(", "string", ",", "encoding", "=", "'utf-8'", ")", "return", "encoded", "+", "bytes", "(", "size", "-", "len", "(", "encoded", ")", ")" ]
Convert string to bytes add padding.
[ "Convert", "string", "to", "bytes", "add", "padding", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/string_helper.py#L5-L10
train
Julius2342/pyvlx
pyvlx/string_helper.py
bytes_to_string
def bytes_to_string(raw): """Convert bytes to string.""" ret = bytes() for byte in raw: if byte == 0x00: return ret.decode("utf-8") ret += bytes([byte]) return ret.decode("utf-8")
python
def bytes_to_string(raw): """Convert bytes to string.""" ret = bytes() for byte in raw: if byte == 0x00: return ret.decode("utf-8") ret += bytes([byte]) return ret.decode("utf-8")
[ "def", "bytes_to_string", "(", "raw", ")", ":", "ret", "=", "bytes", "(", ")", "for", "byte", "in", "raw", ":", "if", "byte", "==", "0x00", ":", "return", "ret", ".", "decode", "(", "\"utf-8\"", ")", "ret", "+=", "bytes", "(", "[", "byte", "]", ")", "return", "ret", ".", "decode", "(", "\"utf-8\"", ")" ]
Convert bytes to string.
[ "Convert", "bytes", "to", "string", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/string_helper.py#L13-L20
train
Julius2342/pyvlx
pyvlx/frames/frame_node_state_position_changed_notification.py
FrameNodeStatePositionChangedNotification.get_payload
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += bytes([self.state]) payload += bytes(self.current_position.raw) payload += bytes(self.target.raw) payload += bytes(self.current_position_fp1.raw) payload += bytes(self.current_position_fp2.raw) payload += bytes(self.current_position_fp3.raw) payload += bytes(self.current_position_fp4.raw) payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255]) payload += struct.pack(">I", self.timestamp) return payload
python
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += bytes([self.state]) payload += bytes(self.current_position.raw) payload += bytes(self.target.raw) payload += bytes(self.current_position_fp1.raw) payload += bytes(self.current_position_fp2.raw) payload += bytes(self.current_position_fp3.raw) payload += bytes(self.current_position_fp4.raw) payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255]) payload += struct.pack(">I", self.timestamp) return payload
[ "def", "get_payload", "(", "self", ")", ":", "payload", "=", "bytes", "(", "[", "self", ".", "node_id", "]", ")", "payload", "+=", "bytes", "(", "[", "self", ".", "state", "]", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "target", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp1", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp2", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp3", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp4", ".", "raw", ")", "payload", "+=", "bytes", "(", "[", "self", ".", "remaining_time", ">>", "8", "&", "255", ",", "self", ".", "remaining_time", "&", "255", "]", ")", "payload", "+=", "struct", ".", "pack", "(", "\">I\"", ",", "self", ".", "timestamp", ")", "return", "payload" ]
Return Payload.
[ "Return", "Payload", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_node_state_position_changed_notification.py#L30-L42
train
Julius2342/pyvlx
pyvlx/frames/frame_node_state_position_changed_notification.py
FrameNodeStatePositionChangedNotification.from_payload
def from_payload(self, payload): """Init frame from binary data.""" self.node_id = payload[0] self.state = payload[1] self.current_position = Parameter(payload[2:4]) self.target = Parameter(payload[4:6]) self.current_position_fp1 = Parameter(payload[6:8]) self.current_position_fp2 = Parameter(payload[8:10]) self.current_position_fp3 = Parameter(payload[10:12]) self.current_position_fp4 = Parameter(payload[12:14]) self.remaining_time = payload[14] * 256 + payload[15] # @VELUX: looks like your timestamp is wrong. Looks like # you are only transmitting the two lower bytes. self.timestamp = struct.unpack(">I", payload[16:20])[0]
python
def from_payload(self, payload): """Init frame from binary data.""" self.node_id = payload[0] self.state = payload[1] self.current_position = Parameter(payload[2:4]) self.target = Parameter(payload[4:6]) self.current_position_fp1 = Parameter(payload[6:8]) self.current_position_fp2 = Parameter(payload[8:10]) self.current_position_fp3 = Parameter(payload[10:12]) self.current_position_fp4 = Parameter(payload[12:14]) self.remaining_time = payload[14] * 256 + payload[15] # @VELUX: looks like your timestamp is wrong. Looks like # you are only transmitting the two lower bytes. self.timestamp = struct.unpack(">I", payload[16:20])[0]
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "node_id", "=", "payload", "[", "0", "]", "self", ".", "state", "=", "payload", "[", "1", "]", "self", ".", "current_position", "=", "Parameter", "(", "payload", "[", "2", ":", "4", "]", ")", "self", ".", "target", "=", "Parameter", "(", "payload", "[", "4", ":", "6", "]", ")", "self", ".", "current_position_fp1", "=", "Parameter", "(", "payload", "[", "6", ":", "8", "]", ")", "self", ".", "current_position_fp2", "=", "Parameter", "(", "payload", "[", "8", ":", "10", "]", ")", "self", ".", "current_position_fp3", "=", "Parameter", "(", "payload", "[", "10", ":", "12", "]", ")", "self", ".", "current_position_fp4", "=", "Parameter", "(", "payload", "[", "12", ":", "14", "]", ")", "self", ".", "remaining_time", "=", "payload", "[", "14", "]", "*", "256", "+", "payload", "[", "15", "]", "# @VELUX: looks like your timestamp is wrong. Looks like", "# you are only transmitting the two lower bytes.", "self", ".", "timestamp", "=", "struct", ".", "unpack", "(", "\">I\"", ",", "payload", "[", "16", ":", "20", "]", ")", "[", "0", "]" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_node_state_position_changed_notification.py#L44-L57
train
Julius2342/pyvlx
pyvlx/house_status_monitor.py
house_status_monitor_enable
async def house_status_monitor_enable(pyvlx): """Enable house status monitor.""" status_monitor_enable = HouseStatusMonitorEnable(pyvlx=pyvlx) await status_monitor_enable.do_api_call() if not status_monitor_enable.success: raise PyVLXException("Unable enable house status monitor.")
python
async def house_status_monitor_enable(pyvlx): """Enable house status monitor.""" status_monitor_enable = HouseStatusMonitorEnable(pyvlx=pyvlx) await status_monitor_enable.do_api_call() if not status_monitor_enable.success: raise PyVLXException("Unable enable house status monitor.")
[ "async", "def", "house_status_monitor_enable", "(", "pyvlx", ")", ":", "status_monitor_enable", "=", "HouseStatusMonitorEnable", "(", "pyvlx", "=", "pyvlx", ")", "await", "status_monitor_enable", ".", "do_api_call", "(", ")", "if", "not", "status_monitor_enable", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable enable house status monitor.\"", ")" ]
Enable house status monitor.
[ "Enable", "house", "status", "monitor", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L51-L56
train
Julius2342/pyvlx
pyvlx/house_status_monitor.py
house_status_monitor_disable
async def house_status_monitor_disable(pyvlx): """Disable house status monitor.""" status_monitor_disable = HouseStatusMonitorDisable(pyvlx=pyvlx) await status_monitor_disable.do_api_call() if not status_monitor_disable.success: raise PyVLXException("Unable disable house status monitor.")
python
async def house_status_monitor_disable(pyvlx): """Disable house status monitor.""" status_monitor_disable = HouseStatusMonitorDisable(pyvlx=pyvlx) await status_monitor_disable.do_api_call() if not status_monitor_disable.success: raise PyVLXException("Unable disable house status monitor.")
[ "async", "def", "house_status_monitor_disable", "(", "pyvlx", ")", ":", "status_monitor_disable", "=", "HouseStatusMonitorDisable", "(", "pyvlx", "=", "pyvlx", ")", "await", "status_monitor_disable", ".", "do_api_call", "(", ")", "if", "not", "status_monitor_disable", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable disable house status monitor.\"", ")" ]
Disable house status monitor.
[ "Disable", "house", "status", "monitor", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L59-L64
train
Julius2342/pyvlx
pyvlx/house_status_monitor.py
HouseStatusMonitorEnable.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorEnableConfirmation): return False self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorEnableConfirmation): return False self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FrameHouseStatusMonitorEnableConfirmation", ")", ":", "return", "False", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L19-L24
train
Julius2342/pyvlx
pyvlx/house_status_monitor.py
HouseStatusMonitorDisable.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FrameHouseStatusMonitorDisableConfirmation", ")", ":", "return", "False", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L39-L44
train
tbielawa/bitmath
bitmath/integrations.py
BitmathType
def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """ try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
python
def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """ try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
[ "def", "BitmathType", "(", "bmstring", ")", ":", "try", ":", "argvalue", "=", "bitmath", ".", "parse_string", "(", "bmstring", ")", "except", "ValueError", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"'%s' can not be parsed into a valid bitmath object\"", "%", "bmstring", ")", "else", ":", "return", "argvalue" ]
An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object
[ "An", "argument", "type", "for", "integrations", "with", "the", "argparse", "module", "." ]
58ad3ac5f076cc6e53f36a91af055c6028c850a5
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L33-L80
train
tbielawa/bitmath
bitmath/integrations.py
BitmathFileTransferSpeed.update
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
python
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "seconds_elapsed", "<", "2e-6", "or", "pbar", ".", "currval", "<", "2e-6", ":", "scaled", "=", "bitmath", ".", "Byte", "(", ")", "else", ":", "speed", "=", "pbar", ".", "currval", "/", "pbar", ".", "seconds_elapsed", "scaled", "=", "bitmath", ".", "Byte", "(", "speed", ")", ".", "best_prefix", "(", "system", "=", "self", ".", "system", ")", "return", "scaled", ".", "format", "(", "self", ".", "format", ")" ]
Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit
[ "Updates", "the", "widget", "with", "the", "current", "NIST", "/", "SI", "speed", "." ]
58ad3ac5f076cc6e53f36a91af055c6028c850a5
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L92-L104
train
Julius2342/pyvlx
pyvlx/command_send.py
CommandSend.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameCommandSendConfirmation) and frame.session_id == self.session_id: if frame.status == CommandSendConfirmationStatus.ACCEPTED: self.success = True return not self.wait_for_completion if isinstance(frame, FrameCommandRemainingTimeNotification) and frame.session_id == self.session_id: # Ignoring FrameCommandRemainingTimeNotification return False if isinstance(frame, FrameCommandRunStatusNotification) and frame.session_id == self.session_id: # At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for. # Ignoring these packets for now return False if isinstance(frame, FrameSessionFinishedNotification) and frame.session_id == self.session_id: return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameCommandSendConfirmation) and frame.session_id == self.session_id: if frame.status == CommandSendConfirmationStatus.ACCEPTED: self.success = True return not self.wait_for_completion if isinstance(frame, FrameCommandRemainingTimeNotification) and frame.session_id == self.session_id: # Ignoring FrameCommandRemainingTimeNotification return False if isinstance(frame, FrameCommandRunStatusNotification) and frame.session_id == self.session_id: # At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for. # Ignoring these packets for now return False if isinstance(frame, FrameSessionFinishedNotification) and frame.session_id == self.session_id: return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameCommandSendConfirmation", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "if", "frame", ".", "status", "==", "CommandSendConfirmationStatus", ".", "ACCEPTED", ":", "self", ".", "success", "=", "True", "return", "not", "self", ".", "wait_for_completion", "if", "isinstance", "(", "frame", ",", "FrameCommandRemainingTimeNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "# Ignoring FrameCommandRemainingTimeNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameCommandRunStatusNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "# At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for.", "# Ignoring these packets for now", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameSessionFinishedNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/command_send.py#L22-L37
train
Julius2342/pyvlx
pyvlx/command_send.py
CommandSend.request_frame
def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
python
def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
[ "def", "request_frame", "(", "self", ")", ":", "self", ".", "session_id", "=", "get_new_session_id", "(", ")", "return", "FrameCommandSendRequest", "(", "node_ids", "=", "[", "self", ".", "node_id", "]", ",", "parameter", "=", "self", ".", "parameter", ",", "session_id", "=", "self", ".", "session_id", ")" ]
Construct initiating frame.
[ "Construct", "initiating", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/command_send.py#L39-L42
train
Julius2342/pyvlx
pyvlx/config.py
Config.read_config
def read_config(self, path): """Read configuration file.""" PYVLXLOG.info('Reading config file: %s', path) try: with open(path, 'r') as filehandle: doc = yaml.safe_load(filehandle) self.test_configuration(doc, path) self.host = doc['config']['host'] self.password = doc['config']['password'] if 'port' in doc['config']: self.port = doc['config']['port'] except FileNotFoundError as ex: raise PyVLXException('file does not exist: {0}'.format(ex))
python
def read_config(self, path): """Read configuration file.""" PYVLXLOG.info('Reading config file: %s', path) try: with open(path, 'r') as filehandle: doc = yaml.safe_load(filehandle) self.test_configuration(doc, path) self.host = doc['config']['host'] self.password = doc['config']['password'] if 'port' in doc['config']: self.port = doc['config']['port'] except FileNotFoundError as ex: raise PyVLXException('file does not exist: {0}'.format(ex))
[ "def", "read_config", "(", "self", ",", "path", ")", ":", "PYVLXLOG", ".", "info", "(", "'Reading config file: %s'", ",", "path", ")", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "filehandle", ":", "doc", "=", "yaml", ".", "safe_load", "(", "filehandle", ")", "self", ".", "test_configuration", "(", "doc", ",", "path", ")", "self", ".", "host", "=", "doc", "[", "'config'", "]", "[", "'host'", "]", "self", ".", "password", "=", "doc", "[", "'config'", "]", "[", "'password'", "]", "if", "'port'", "in", "doc", "[", "'config'", "]", ":", "self", ".", "port", "=", "doc", "[", "'config'", "]", "[", "'port'", "]", "except", "FileNotFoundError", "as", "ex", ":", "raise", "PyVLXException", "(", "'file does not exist: {0}'", ".", "format", "(", "ex", ")", ")" ]
Read configuration file.
[ "Read", "configuration", "file", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/config.py#L22-L34
train
pyQode/pyqode.qt
pyqode/qt/__init__.py
setup_apiv2
def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """ # setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
python
def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """ # setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
[ "def", "setup_apiv2", "(", ")", ":", "# setup PyQt api to version 2", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'setting up SIP API to version 2'", ")", "import", "sip", "try", ":", "sip", ".", "setapi", "(", "\"QString\"", ",", "2", ")", "sip", ".", "setapi", "(", "\"QVariant\"", ",", "2", ")", "except", "ValueError", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "critical", "(", "\"failed to set up sip api to version 2 for PyQt4\"", ")", "raise", "ImportError", "(", "'PyQt4'", ")" ]
Setup apiv2 when using PyQt4 and Python2.
[ "Setup", "apiv2", "when", "using", "PyQt4", "and", "Python2", "." ]
56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd
https://github.com/pyQode/pyqode.qt/blob/56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd/pyqode/qt/__init__.py#L79-L94
train
pyQode/pyqode.qt
pyqode/qt/__init__.py
autodetect
def autodetect(): """ Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide """ logging.getLogger(__name__).debug('auto-detecting QT_API') try: logging.getLogger(__name__).debug('trying PyQt5') import PyQt5 os.environ[QT_API] = PYQT5_API[0] logging.getLogger(__name__).debug('imported PyQt5') except ImportError: try: logging.getLogger(__name__).debug('trying PyQt4') setup_apiv2() import PyQt4 os.environ[QT_API] = PYQT4_API[0] logging.getLogger(__name__).debug('imported PyQt4') except ImportError: try: logging.getLogger(__name__).debug('trying PySide') import PySide os.environ[QT_API] = PYSIDE_API[0] logging.getLogger(__name__).debug('imported PySide') except ImportError: raise PythonQtError('No Qt bindings could be found')
python
def autodetect(): """ Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide """ logging.getLogger(__name__).debug('auto-detecting QT_API') try: logging.getLogger(__name__).debug('trying PyQt5') import PyQt5 os.environ[QT_API] = PYQT5_API[0] logging.getLogger(__name__).debug('imported PyQt5') except ImportError: try: logging.getLogger(__name__).debug('trying PyQt4') setup_apiv2() import PyQt4 os.environ[QT_API] = PYQT4_API[0] logging.getLogger(__name__).debug('imported PyQt4') except ImportError: try: logging.getLogger(__name__).debug('trying PySide') import PySide os.environ[QT_API] = PYSIDE_API[0] logging.getLogger(__name__).debug('imported PySide') except ImportError: raise PythonQtError('No Qt bindings could be found')
[ "def", "autodetect", "(", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'auto-detecting QT_API'", ")", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PyQt5'", ")", "import", "PyQt5", "os", ".", "environ", "[", "QT_API", "]", "=", "PYQT5_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PyQt5'", ")", "except", "ImportError", ":", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PyQt4'", ")", "setup_apiv2", "(", ")", "import", "PyQt4", "os", ".", "environ", "[", "QT_API", "]", "=", "PYQT4_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PyQt4'", ")", "except", "ImportError", ":", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PySide'", ")", "import", "PySide", "os", ".", "environ", "[", "QT_API", "]", "=", "PYSIDE_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PySide'", ")", "except", "ImportError", ":", "raise", "PythonQtError", "(", "'No Qt bindings could be found'", ")" ]
Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide
[ "Auto", "-", "detects", "and", "use", "the", "first", "available", "QT_API", "by", "importing", "them", "in", "the", "following", "order", ":" ]
56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd
https://github.com/pyQode/pyqode.qt/blob/56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd/pyqode/qt/__init__.py#L97-L126
train
Julius2342/pyvlx
old_api/pyvlx/rollershutter.py
RollerShutter.from_config
def from_config(cls, pyvlx, item): """Read roller shutter from config.""" name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
python
def from_config(cls, pyvlx, item): """Read roller shutter from config.""" name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
[ "def", "from_config", "(", "cls", ",", "pyvlx", ",", "item", ")", ":", "name", "=", "item", "[", "'name'", "]", "ident", "=", "item", "[", "'id'", "]", "subtype", "=", "item", "[", "'subtype'", "]", "typeid", "=", "item", "[", "'typeId'", "]", "return", "cls", "(", "pyvlx", ",", "ident", ",", "name", ",", "subtype", ",", "typeid", ")" ]
Read roller shutter from config.
[ "Read", "roller", "shutter", "from", "config", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/rollershutter.py#L19-L25
train
Julius2342/pyvlx
pyvlx/get_scene_list.py
GetSceneList.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetSceneListConfirmation): self.count_scenes = frame.count_scenes if self.count_scenes == 0: self.success = True return True # We are still waiting for FrameGetSceneListNotification(s) return False if isinstance(frame, FrameGetSceneListNotification): self.scenes.extend(frame.scenes) if frame.remaining_scenes != 0: # We are still waiting for FrameGetSceneListConfirmation(s) return False if self.count_scenes != len(self.scenes): PYVLXLOG.warning("Warning: number of received scenes does not match expected number") self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetSceneListConfirmation): self.count_scenes = frame.count_scenes if self.count_scenes == 0: self.success = True return True # We are still waiting for FrameGetSceneListNotification(s) return False if isinstance(frame, FrameGetSceneListNotification): self.scenes.extend(frame.scenes) if frame.remaining_scenes != 0: # We are still waiting for FrameGetSceneListConfirmation(s) return False if self.count_scenes != len(self.scenes): PYVLXLOG.warning("Warning: number of received scenes does not match expected number") self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetSceneListConfirmation", ")", ":", "self", ".", "count_scenes", "=", "frame", ".", "count_scenes", "if", "self", ".", "count_scenes", "==", "0", ":", "self", ".", "success", "=", "True", "return", "True", "# We are still waiting for FrameGetSceneListNotification(s)", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetSceneListNotification", ")", ":", "self", ".", "scenes", ".", "extend", "(", "frame", ".", "scenes", ")", "if", "frame", ".", "remaining_scenes", "!=", "0", ":", "# We are still waiting for FrameGetSceneListConfirmation(s)", "return", "False", "if", "self", ".", "count_scenes", "!=", "len", "(", "self", ".", "scenes", ")", ":", "PYVLXLOG", ".", "warning", "(", "\"Warning: number of received scenes does not match expected number\"", ")", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_scene_list.py#L19-L37
train
Julius2342/pyvlx
pyvlx/frames/frame_get_scene_list.py
FrameGetSceneListNotification.get_payload
def get_payload(self): """Return Payload.""" ret = bytes([len(self.scenes)]) for number, name in self.scenes: ret += bytes([number]) ret += string_to_bytes(name, 64) ret += bytes([self.remaining_scenes]) return ret
python
def get_payload(self): """Return Payload.""" ret = bytes([len(self.scenes)]) for number, name in self.scenes: ret += bytes([number]) ret += string_to_bytes(name, 64) ret += bytes([self.remaining_scenes]) return ret
[ "def", "get_payload", "(", "self", ")", ":", "ret", "=", "bytes", "(", "[", "len", "(", "self", ".", "scenes", ")", "]", ")", "for", "number", ",", "name", "in", "self", ".", "scenes", ":", "ret", "+=", "bytes", "(", "[", "number", "]", ")", "ret", "+=", "string_to_bytes", "(", "name", ",", "64", ")", "ret", "+=", "bytes", "(", "[", "self", ".", "remaining_scenes", "]", ")", "return", "ret" ]
Return Payload.
[ "Return", "Payload", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_scene_list.py#L51-L58
train
Julius2342/pyvlx
pyvlx/frames/frame_get_scene_list.py
FrameGetSceneListNotification.from_payload
def from_payload(self, payload): """Init frame from binary data.""" number_of_objects = payload[0] self.remaining_scenes = payload[-1] predicted_len = number_of_objects * 65 + 2 if len(payload) != predicted_len: raise PyVLXException('scene_list_notification_wrong_length') self.scenes = [] for i in range(number_of_objects): scene = payload[(i*65+1):(i*65+66)] number = scene[0] name = bytes_to_string(scene[1:]) self.scenes.append((number, name))
python
def from_payload(self, payload): """Init frame from binary data.""" number_of_objects = payload[0] self.remaining_scenes = payload[-1] predicted_len = number_of_objects * 65 + 2 if len(payload) != predicted_len: raise PyVLXException('scene_list_notification_wrong_length') self.scenes = [] for i in range(number_of_objects): scene = payload[(i*65+1):(i*65+66)] number = scene[0] name = bytes_to_string(scene[1:]) self.scenes.append((number, name))
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "number_of_objects", "=", "payload", "[", "0", "]", "self", ".", "remaining_scenes", "=", "payload", "[", "-", "1", "]", "predicted_len", "=", "number_of_objects", "*", "65", "+", "2", "if", "len", "(", "payload", ")", "!=", "predicted_len", ":", "raise", "PyVLXException", "(", "'scene_list_notification_wrong_length'", ")", "self", ".", "scenes", "=", "[", "]", "for", "i", "in", "range", "(", "number_of_objects", ")", ":", "scene", "=", "payload", "[", "(", "i", "*", "65", "+", "1", ")", ":", "(", "i", "*", "65", "+", "66", ")", "]", "number", "=", "scene", "[", "0", "]", "name", "=", "bytes_to_string", "(", "scene", "[", "1", ":", "]", ")", "self", ".", "scenes", ".", "append", "(", "(", "number", ",", "name", ")", ")" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_scene_list.py#L60-L72
train
spacetelescope/synphot_refactor
synphot/specio.py
read_remote_spec
def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
python
def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
[ "def", "read_remote_spec", "(", "filename", ",", "encoding", "=", "'binary'", ",", "cache", "=", "True", ",", "show_progress", "=", "True", ",", "*", "*", "kwargs", ")", ":", "with", "get_readable_fileobj", "(", "filename", ",", "encoding", "=", "encoding", ",", "cache", "=", "cache", ",", "show_progress", "=", "show_progress", ")", "as", "fd", ":", "header", ",", "wavelengths", ",", "fluxes", "=", "read_spec", "(", "fd", ",", "fname", "=", "filename", ",", "*", "*", "kwargs", ")", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum.
[ "Read", "FITS", "or", "ASCII", "spectrum", "from", "a", "remote", "location", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L26-L55
train
spacetelescope/synphot_refactor
synphot/specio.py
read_spec
def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """ if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
python
def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """ if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
[ "def", "read_spec", "(", "filename", ",", "fname", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "fname", "=", "filename", "elif", "not", "fname", ":", "# pragma: no cover", "raise", "exceptions", ".", "SynphotError", "(", "'Cannot determine filename.'", ")", "if", "fname", ".", "endswith", "(", "'fits'", ")", "or", "fname", ".", "endswith", "(", "'fit'", ")", ":", "read_func", "=", "read_fits_spec", "else", ":", "read_func", "=", "read_ascii_spec", "return", "read_func", "(", "filename", ",", "*", "*", "kwargs", ")" ]
Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed.
[ "Read", "FITS", "or", "ASCII", "spectrum", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L58-L97
train
spacetelescope/synphot_refactor
synphot/specio.py
read_ascii_spec
def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """ header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes
python
def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """ header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes
[ "def", "read_ascii_spec", "(", "filename", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ",", "*", "*", "kwargs", ")", ":", "header", "=", "{", "}", "dat", "=", "ascii", ".", "read", "(", "filename", ",", "*", "*", "kwargs", ")", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", "wavelengths", "=", "dat", ".", "columns", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "float64", ")", "*", "wave_unit", "fluxes", "=", "dat", ".", "columns", "[", "1", "]", ".", "data", ".", "astype", "(", "np", ".", "float64", ")", "*", "flux_unit", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision.
[ "Read", "ASCII", "spectrum", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L100-L144
train
spacetelescope/synphot_refactor
synphot/specio.py
read_fits_spec
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ fs = fits.open(filename) header = dict(fs[str('PRIMARY')].header) wave_dat = fs[ext].data.field(wave_col).copy() flux_dat = fs[ext].data.field(flux_col).copy() fits_wave_unit = fs[ext].header.get('TUNIT1') fits_flux_unit = fs[ext].header.get('TUNIT2') if fits_wave_unit is not None: try: wave_unit = units.validate_unit(fits_wave_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid wavelength unit, using ' '{1}: {2}'.format(fits_wave_unit, wave_unit, e), AstropyUserWarning) if fits_flux_unit is not None: try: flux_unit = units.validate_unit(fits_flux_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid flux unit, using ' '{1}: {2}'.format(fits_flux_unit, flux_unit, e), AstropyUserWarning) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = wave_dat * wave_unit fluxes = flux_dat * flux_unit if isinstance(filename, str): fs.close() return header, wavelengths, fluxes
python
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ fs = fits.open(filename) header = dict(fs[str('PRIMARY')].header) wave_dat = fs[ext].data.field(wave_col).copy() flux_dat = fs[ext].data.field(flux_col).copy() fits_wave_unit = fs[ext].header.get('TUNIT1') fits_flux_unit = fs[ext].header.get('TUNIT2') if fits_wave_unit is not None: try: wave_unit = units.validate_unit(fits_wave_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid wavelength unit, using ' '{1}: {2}'.format(fits_wave_unit, wave_unit, e), AstropyUserWarning) if fits_flux_unit is not None: try: flux_unit = units.validate_unit(fits_flux_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid flux unit, using ' '{1}: {2}'.format(fits_flux_unit, flux_unit, e), AstropyUserWarning) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = wave_dat * wave_unit fluxes = flux_dat * flux_unit if isinstance(filename, str): fs.close() return header, wavelengths, fluxes
[ "def", "read_fits_spec", "(", "filename", ",", "ext", "=", "1", ",", "wave_col", "=", "'WAVELENGTH'", ",", "flux_col", "=", "'FLUX'", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ")", ":", "fs", "=", "fits", ".", "open", "(", "filename", ")", "header", "=", "dict", "(", "fs", "[", "str", "(", "'PRIMARY'", ")", "]", ".", "header", ")", "wave_dat", "=", "fs", "[", "ext", "]", ".", "data", ".", "field", "(", "wave_col", ")", ".", "copy", "(", ")", "flux_dat", "=", "fs", "[", "ext", "]", ".", "data", ".", "field", "(", "flux_col", ")", ".", "copy", "(", ")", "fits_wave_unit", "=", "fs", "[", "ext", "]", ".", "header", ".", "get", "(", "'TUNIT1'", ")", "fits_flux_unit", "=", "fs", "[", "ext", "]", ".", "header", ".", "get", "(", "'TUNIT2'", ")", "if", "fits_wave_unit", "is", "not", "None", ":", "try", ":", "wave_unit", "=", "units", ".", "validate_unit", "(", "fits_wave_unit", ")", "except", "(", "exceptions", ".", "SynphotError", ",", "ValueError", ")", "as", "e", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'{0} from FITS header is not valid wavelength unit, using '", "'{1}: {2}'", ".", "format", "(", "fits_wave_unit", ",", "wave_unit", ",", "e", ")", ",", "AstropyUserWarning", ")", "if", "fits_flux_unit", "is", "not", "None", ":", "try", ":", "flux_unit", "=", "units", ".", "validate_unit", "(", "fits_flux_unit", ")", "except", "(", "exceptions", ".", "SynphotError", ",", "ValueError", ")", "as", "e", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'{0} from FITS header is not valid flux unit, using '", "'{1}: {2}'", ".", "format", "(", "fits_flux_unit", ",", "flux_unit", ",", "e", ")", ",", "AstropyUserWarning", ")", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", "wavelengths", "=", "wave_dat", "*", "wave_unit", "fluxes", "=", "flux_dat", "*", "flux_unit", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "fs", ".", "close", "(", ")", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum.
[ "Read", "FITS", "spectrum", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L147-L215
train
spacetelescope/synphot_refactor
synphot/specio.py
write_fits_spec
def write_fits_spec(filename, wavelengths, fluxes, pri_header={}, ext_header={}, overwrite=False, trim_zero=True, pad_zero_ends=True, precision=None, epsilon=0.00032, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. """ if isinstance(wavelengths, u.Quantity): wave_unit = wavelengths.unit wave_value = wavelengths.value else: wave_value = wavelengths if isinstance(fluxes, u.Quantity): flux_unit = fluxes.unit flux_value = fluxes.value else: flux_value = fluxes wave_unit = units.validate_unit(wave_unit).to_string().upper() flux_unit = units.validate_unit(flux_unit).to_string().upper() if wave_value.shape != flux_value.shape: raise exceptions.SynphotError( 'Wavelengths have shape {0} but fluxes have shape {1}'.format( wave_value.shape, flux_value.shape)) # Remove rows with zero flux. Putting this before precision logic to avoid # keeping duplicate wavelengths with zero flux. if trim_zero: idx = np.where(flux_value != 0) wave_value = wave_value[idx] flux_value = flux_value[idx] n_thrown = wave_value.size - len(idx[0]) if n_thrown != 0: log.info('{0} zero-flux rows are thrown out'.format(n_thrown)) # Only these Numpy types are supported # 'f' np.float32 # 'd' np.float64 pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion # Use native flux precision if precision is None: precision = flux_value.dtype.char if precision not in pcodes: raise exceptions.SynphotError('flux is not float32 or float64') # Use user specified precision else: precision = precision.lower() if precision == 'single': precision = 'f' elif precision == 'double': precision = 'd' else: raise exceptions.SynphotError( 'precision must be single or double') # Now check wavelength precision wave_precision = wave_value.dtype.char if wave_precision not in pcodes: raise exceptions.SynphotError( 'wavelength is not float32 or float64') # If wavelength is double-precision but data is written out as # single-precision, wavelength values have to be recalculated # so that they will still be sorted with no duplicates. if wave_precision == 'd' and precision == 'f': orig_size = wave_value.size idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon) wave_value = np.append(wave_value[idx], wave_value[-1]) flux_value = np.append(flux_value[idx], flux_value[-1]) n_thrown = orig_size - wave_value.size if n_thrown != 0: warnings.warn( '{0} rows are thrown out in converting wavelengths from ' 'double- to single-precision'.format(n_thrown), AstropyUserWarning) # Keep one zero at each end if pad_zero_ends: w1 = wave_value[0] ** 2 / wave_value[1] w2 = wave_value[-1] ** 2 / wave_value[-2] wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2]) flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0]) # Construct the columns cw = fits.Column(name=wave_col, array=wave_value, unit=wave_unit, format=pcodes[precision]) cf = fits.Column(name=flux_col, array=flux_value, unit=flux_unit, format=pcodes[precision]) # These are written to the primary header: # 1. Filename # 2. Origin # 3. User dictionary (can overwrite defaults) hdr_hdu = fits.PrimaryHDU() hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file') hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__)) for key, val in pri_header.items(): hdr_hdu.header[key] = val # Make the extension HDU and include user dictionary in extension header. tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf])) for key, val in ext_header.items(): tab_hdu.header[key] = val # Write to file hdulist = fits.HDUList([hdr_hdu]) hdulist.append(tab_hdu) hdulist.writeto(filename, overwrite=overwrite)
python
def write_fits_spec(filename, wavelengths, fluxes, pri_header={}, ext_header={}, overwrite=False, trim_zero=True, pad_zero_ends=True, precision=None, epsilon=0.00032, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. """ if isinstance(wavelengths, u.Quantity): wave_unit = wavelengths.unit wave_value = wavelengths.value else: wave_value = wavelengths if isinstance(fluxes, u.Quantity): flux_unit = fluxes.unit flux_value = fluxes.value else: flux_value = fluxes wave_unit = units.validate_unit(wave_unit).to_string().upper() flux_unit = units.validate_unit(flux_unit).to_string().upper() if wave_value.shape != flux_value.shape: raise exceptions.SynphotError( 'Wavelengths have shape {0} but fluxes have shape {1}'.format( wave_value.shape, flux_value.shape)) # Remove rows with zero flux. Putting this before precision logic to avoid # keeping duplicate wavelengths with zero flux. if trim_zero: idx = np.where(flux_value != 0) wave_value = wave_value[idx] flux_value = flux_value[idx] n_thrown = wave_value.size - len(idx[0]) if n_thrown != 0: log.info('{0} zero-flux rows are thrown out'.format(n_thrown)) # Only these Numpy types are supported # 'f' np.float32 # 'd' np.float64 pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion # Use native flux precision if precision is None: precision = flux_value.dtype.char if precision not in pcodes: raise exceptions.SynphotError('flux is not float32 or float64') # Use user specified precision else: precision = precision.lower() if precision == 'single': precision = 'f' elif precision == 'double': precision = 'd' else: raise exceptions.SynphotError( 'precision must be single or double') # Now check wavelength precision wave_precision = wave_value.dtype.char if wave_precision not in pcodes: raise exceptions.SynphotError( 'wavelength is not float32 or float64') # If wavelength is double-precision but data is written out as # single-precision, wavelength values have to be recalculated # so that they will still be sorted with no duplicates. if wave_precision == 'd' and precision == 'f': orig_size = wave_value.size idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon) wave_value = np.append(wave_value[idx], wave_value[-1]) flux_value = np.append(flux_value[idx], flux_value[-1]) n_thrown = orig_size - wave_value.size if n_thrown != 0: warnings.warn( '{0} rows are thrown out in converting wavelengths from ' 'double- to single-precision'.format(n_thrown), AstropyUserWarning) # Keep one zero at each end if pad_zero_ends: w1 = wave_value[0] ** 2 / wave_value[1] w2 = wave_value[-1] ** 2 / wave_value[-2] wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2]) flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0]) # Construct the columns cw = fits.Column(name=wave_col, array=wave_value, unit=wave_unit, format=pcodes[precision]) cf = fits.Column(name=flux_col, array=flux_value, unit=flux_unit, format=pcodes[precision]) # These are written to the primary header: # 1. Filename # 2. Origin # 3. User dictionary (can overwrite defaults) hdr_hdu = fits.PrimaryHDU() hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file') hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__)) for key, val in pri_header.items(): hdr_hdu.header[key] = val # Make the extension HDU and include user dictionary in extension header. tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf])) for key, val in ext_header.items(): tab_hdu.header[key] = val # Write to file hdulist = fits.HDUList([hdr_hdu]) hdulist.append(tab_hdu) hdulist.writeto(filename, overwrite=overwrite)
[ "def", "write_fits_spec", "(", "filename", ",", "wavelengths", ",", "fluxes", ",", "pri_header", "=", "{", "}", ",", "ext_header", "=", "{", "}", ",", "overwrite", "=", "False", ",", "trim_zero", "=", "True", ",", "pad_zero_ends", "=", "True", ",", "precision", "=", "None", ",", "epsilon", "=", "0.00032", ",", "wave_col", "=", "'WAVELENGTH'", ",", "flux_col", "=", "'FLUX'", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ")", ":", "if", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "wave_unit", "=", "wavelengths", ".", "unit", "wave_value", "=", "wavelengths", ".", "value", "else", ":", "wave_value", "=", "wavelengths", "if", "isinstance", "(", "fluxes", ",", "u", ".", "Quantity", ")", ":", "flux_unit", "=", "fluxes", ".", "unit", "flux_value", "=", "fluxes", ".", "value", "else", ":", "flux_value", "=", "fluxes", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", ".", "to_string", "(", ")", ".", "upper", "(", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", ".", "to_string", "(", ")", ".", "upper", "(", ")", "if", "wave_value", ".", "shape", "!=", "flux_value", ".", "shape", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Wavelengths have shape {0} but fluxes have shape {1}'", ".", "format", "(", "wave_value", ".", "shape", ",", "flux_value", ".", "shape", ")", ")", "# Remove rows with zero flux. Putting this before precision logic to avoid", "# keeping duplicate wavelengths with zero flux.", "if", "trim_zero", ":", "idx", "=", "np", ".", "where", "(", "flux_value", "!=", "0", ")", "wave_value", "=", "wave_value", "[", "idx", "]", "flux_value", "=", "flux_value", "[", "idx", "]", "n_thrown", "=", "wave_value", ".", "size", "-", "len", "(", "idx", "[", "0", "]", ")", "if", "n_thrown", "!=", "0", ":", "log", ".", "info", "(", "'{0} zero-flux rows are thrown out'", ".", "format", "(", "n_thrown", ")", ")", "# Only these Numpy types are supported", "# 'f' np.float32", "# 'd' np.float64", "pcodes", "=", "{", "'d'", ":", "'D'", ",", "'f'", ":", "'E'", "}", "# Numpy to FITS conversion", "# Use native flux precision", "if", "precision", "is", "None", ":", "precision", "=", "flux_value", ".", "dtype", ".", "char", "if", "precision", "not", "in", "pcodes", ":", "raise", "exceptions", ".", "SynphotError", "(", "'flux is not float32 or float64'", ")", "# Use user specified precision", "else", ":", "precision", "=", "precision", ".", "lower", "(", ")", "if", "precision", "==", "'single'", ":", "precision", "=", "'f'", "elif", "precision", "==", "'double'", ":", "precision", "=", "'d'", "else", ":", "raise", "exceptions", ".", "SynphotError", "(", "'precision must be single or double'", ")", "# Now check wavelength precision", "wave_precision", "=", "wave_value", ".", "dtype", ".", "char", "if", "wave_precision", "not", "in", "pcodes", ":", "raise", "exceptions", ".", "SynphotError", "(", "'wavelength is not float32 or float64'", ")", "# If wavelength is double-precision but data is written out as", "# single-precision, wavelength values have to be recalculated", "# so that they will still be sorted with no duplicates.", "if", "wave_precision", "==", "'d'", "and", "precision", "==", "'f'", ":", "orig_size", "=", "wave_value", ".", "size", "idx", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "wave_value", "[", "1", ":", "]", "-", "wave_value", "[", ":", "-", "1", "]", ")", ">", "epsilon", ")", "wave_value", "=", "np", ".", "append", "(", "wave_value", "[", "idx", "]", ",", "wave_value", "[", "-", "1", "]", ")", "flux_value", "=", "np", ".", "append", "(", "flux_value", "[", "idx", "]", ",", "flux_value", "[", "-", "1", "]", ")", "n_thrown", "=", "orig_size", "-", "wave_value", ".", "size", "if", "n_thrown", "!=", "0", ":", "warnings", ".", "warn", "(", "'{0} rows are thrown out in converting wavelengths from '", "'double- to single-precision'", ".", "format", "(", "n_thrown", ")", ",", "AstropyUserWarning", ")", "# Keep one zero at each end", "if", "pad_zero_ends", ":", "w1", "=", "wave_value", "[", "0", "]", "**", "2", "/", "wave_value", "[", "1", "]", "w2", "=", "wave_value", "[", "-", "1", "]", "**", "2", "/", "wave_value", "[", "-", "2", "]", "wave_value", "=", "np", ".", "insert", "(", "wave_value", ",", "[", "0", ",", "wave_value", ".", "size", "]", ",", "[", "w1", ",", "w2", "]", ")", "flux_value", "=", "np", ".", "insert", "(", "flux_value", ",", "[", "0", ",", "flux_value", ".", "size", "]", ",", "[", "0.0", ",", "0.0", "]", ")", "# Construct the columns", "cw", "=", "fits", ".", "Column", "(", "name", "=", "wave_col", ",", "array", "=", "wave_value", ",", "unit", "=", "wave_unit", ",", "format", "=", "pcodes", "[", "precision", "]", ")", "cf", "=", "fits", ".", "Column", "(", "name", "=", "flux_col", ",", "array", "=", "flux_value", ",", "unit", "=", "flux_unit", ",", "format", "=", "pcodes", "[", "precision", "]", ")", "# These are written to the primary header:", "# 1. Filename", "# 2. Origin", "# 3. User dictionary (can overwrite defaults)", "hdr_hdu", "=", "fits", ".", "PrimaryHDU", "(", ")", "hdr_hdu", ".", "header", "[", "'filename'", "]", "=", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "'name of file'", ")", "hdr_hdu", ".", "header", "[", "'origin'", "]", "=", "(", "'synphot'", ",", "'Version {0}'", ".", "format", "(", "__version__", ")", ")", "for", "key", ",", "val", "in", "pri_header", ".", "items", "(", ")", ":", "hdr_hdu", ".", "header", "[", "key", "]", "=", "val", "# Make the extension HDU and include user dictionary in extension header.", "tab_hdu", "=", "fits", ".", "BinTableHDU", ".", "from_columns", "(", "fits", ".", "ColDefs", "(", "[", "cw", ",", "cf", "]", ")", ")", "for", "key", ",", "val", "in", "ext_header", ".", "items", "(", ")", ":", "tab_hdu", ".", "header", "[", "key", "]", "=", "val", "# Write to file", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "hdr_hdu", "]", ")", "hdulist", ".", "append", "(", "tab_hdu", ")", "hdulist", ".", "writeto", "(", "filename", ",", "overwrite", "=", "overwrite", ")" ]
Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported.
[ "Write", "FITS", "spectrum", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L218-L384
train
spacetelescope/synphot_refactor
synphot/units.py
spectral_density_vega
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
python
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
[ "def", "spectral_density_vega", "(", "wav", ",", "vegaflux", ")", ":", "vega_photlam", "=", "vegaflux", ".", "to", "(", "PHOTLAM", ",", "equivalencies", "=", "u", ".", "spectral_density", "(", "wav", ")", ")", ".", "value", "def", "converter", "(", "x", ")", ":", "\"\"\"Set nan/inf to -99 mag.\"\"\"", "val", "=", "-", "2.5", "*", "np", ".", "log10", "(", "x", "/", "vega_photlam", ")", "result", "=", "np", ".", "zeros", "(", "val", ".", "shape", ",", "dtype", "=", "np", ".", "float64", ")", "-", "99", "mask", "=", "np", ".", "isfinite", "(", "val", ")", "if", "result", ".", "ndim", ">", "0", ":", "result", "[", "mask", "]", "=", "val", "[", "mask", "]", "elif", "mask", ":", "result", "=", "np", ".", "asarray", "(", "val", ")", "return", "result", "def", "iconverter", "(", "x", ")", ":", "return", "vega_photlam", "*", "10", "**", "(", "-", "0.4", "*", "x", ")", "return", "[", "(", "PHOTLAM", ",", "VEGAMAG", ",", "converter", ",", "iconverter", ")", "]" ]
Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies.
[ "Flux", "equivalencies", "between", "PHOTLAM", "and", "VEGAMAG", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L64-L99
train
spacetelescope/synphot_refactor
synphot/units.py
spectral_density_count
def spectral_density_count(wav, area): """Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. """ from .binning import calculate_bin_widths, calculate_bin_edges wav = wav.to(u.AA, equivalencies=u.spectral()) area = area.to(AREA) bin_widths = calculate_bin_widths(calculate_bin_edges(wav)) factor = bin_widths.value * area.value def converter_count(x): return x * factor def iconverter_count(x): return x / factor def converter_obmag(x): return -2.5 * np.log10(x * factor) def iconverter_obmag(x): return 10**(-0.4 * x) / factor return [(PHOTLAM, u.count, converter_count, iconverter_count), (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)]
python
def spectral_density_count(wav, area): """Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. """ from .binning import calculate_bin_widths, calculate_bin_edges wav = wav.to(u.AA, equivalencies=u.spectral()) area = area.to(AREA) bin_widths = calculate_bin_widths(calculate_bin_edges(wav)) factor = bin_widths.value * area.value def converter_count(x): return x * factor def iconverter_count(x): return x / factor def converter_obmag(x): return -2.5 * np.log10(x * factor) def iconverter_obmag(x): return 10**(-0.4 * x) / factor return [(PHOTLAM, u.count, converter_count, iconverter_count), (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)]
[ "def", "spectral_density_count", "(", "wav", ",", "area", ")", ":", "from", ".", "binning", "import", "calculate_bin_widths", ",", "calculate_bin_edges", "wav", "=", "wav", ".", "to", "(", "u", ".", "AA", ",", "equivalencies", "=", "u", ".", "spectral", "(", ")", ")", "area", "=", "area", ".", "to", "(", "AREA", ")", "bin_widths", "=", "calculate_bin_widths", "(", "calculate_bin_edges", "(", "wav", ")", ")", "factor", "=", "bin_widths", ".", "value", "*", "area", ".", "value", "def", "converter_count", "(", "x", ")", ":", "return", "x", "*", "factor", "def", "iconverter_count", "(", "x", ")", ":", "return", "x", "/", "factor", "def", "converter_obmag", "(", "x", ")", ":", "return", "-", "2.5", "*", "np", ".", "log10", "(", "x", "*", "factor", ")", "def", "iconverter_obmag", "(", "x", ")", ":", "return", "10", "**", "(", "-", "0.4", "*", "x", ")", "/", "factor", "return", "[", "(", "PHOTLAM", ",", "u", ".", "count", ",", "converter_count", ",", "iconverter_count", ")", ",", "(", "PHOTLAM", ",", "OBMAG", ",", "converter_obmag", ",", "iconverter_obmag", ")", "]" ]
Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies.
[ "Flux", "equivalencies", "between", "PHOTLAM", "and", "count", "/", "OBMAG", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L102-L140
train
spacetelescope/synphot_refactor
synphot/units.py
convert_flux
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
python
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
[ "def", "convert_flux", "(", "wavelengths", ",", "fluxes", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "fluxes", ",", "u", ".", "Quantity", ")", ":", "fluxes", "=", "fluxes", "*", "PHOTLAM", "out_flux_unit", "=", "validate_unit", "(", "out_flux_unit", ")", "out_flux_unit_name", "=", "out_flux_unit", ".", "to_string", "(", ")", "in_flux_unit_name", "=", "fluxes", ".", "unit", ".", "to_string", "(", ")", "# No conversion necessary", "if", "in_flux_unit_name", "==", "out_flux_unit_name", ":", "return", "fluxes", "in_flux_type", "=", "fluxes", ".", "unit", ".", "physical_type", "out_flux_type", "=", "out_flux_unit", ".", "physical_type", "# Wavelengths must Quantity", "if", "not", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "wavelengths", "=", "wavelengths", "*", "u", ".", "AA", "eqv", "=", "u", ".", "spectral_density", "(", "wavelengths", ")", "# Use built-in astropy equivalencies", "try", ":", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "# Use PHOTLAM as in-between unit", "except", "u", ".", "UnitConversionError", ":", "# Convert input unit to PHOTLAM", "if", "fluxes", ".", "unit", "==", "PHOTLAM", ":", "flux_photlam", "=", "fluxes", "elif", "in_flux_type", "!=", "'unknown'", ":", "flux_photlam", "=", "fluxes", ".", "to", "(", "PHOTLAM", ",", "eqv", ")", "else", ":", "flux_photlam", "=", "_convert_flux", "(", "wavelengths", ",", "fluxes", ",", "PHOTLAM", ",", "*", "*", "kwargs", ")", "# Convert PHOTLAM to output unit", "if", "out_flux_unit", "==", "PHOTLAM", ":", "out_flux", "=", "flux_photlam", "elif", "out_flux_type", "!=", "'unknown'", ":", "out_flux", "=", "flux_photlam", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "else", ":", "out_flux", "=", "_convert_flux", "(", "wavelengths", ",", "flux_photlam", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", "return", "out_flux" ]
Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed.
[ "Perform", "conversion", "for", ":", "ref", ":", "supported", "flux", "units", "<synphot", "-", "flux", "-", "units", ">", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L143-L225
train
spacetelescope/synphot_refactor
synphot/units.py
_convert_flux
def _convert_flux(wavelengths, fluxes, out_flux_unit, area=None, vegaspec=None): """Flux conversion for PHOTLAM <-> X.""" flux_unit_names = (fluxes.unit.to_string(), out_flux_unit.to_string()) if PHOTLAM.to_string() not in flux_unit_names: raise exceptions.SynphotError( 'PHOTLAM must be one of the conversion units but get ' '{0}.'.format(flux_unit_names)) # VEGAMAG if VEGAMAG.to_string() in flux_unit_names: from .spectrum import SourceSpectrum if not isinstance(vegaspec, SourceSpectrum): raise exceptions.SynphotError('Vega spectrum is missing.') flux_vega = vegaspec(wavelengths) out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_vega(wavelengths, flux_vega)) # OBMAG or count elif (u.count in (fluxes.unit, out_flux_unit) or OBMAG.to_string() in flux_unit_names): if area is None: raise exceptions.SynphotError( 'Area is compulsory for conversion involving count or OBMAG.') elif not isinstance(area, u.Quantity): area = area * AREA out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_count(wavelengths, area)) else: raise u.UnitsError('{0} and {1} are not convertible'.format( fluxes.unit, out_flux_unit)) return out_flux
python
def _convert_flux(wavelengths, fluxes, out_flux_unit, area=None, vegaspec=None): """Flux conversion for PHOTLAM <-> X.""" flux_unit_names = (fluxes.unit.to_string(), out_flux_unit.to_string()) if PHOTLAM.to_string() not in flux_unit_names: raise exceptions.SynphotError( 'PHOTLAM must be one of the conversion units but get ' '{0}.'.format(flux_unit_names)) # VEGAMAG if VEGAMAG.to_string() in flux_unit_names: from .spectrum import SourceSpectrum if not isinstance(vegaspec, SourceSpectrum): raise exceptions.SynphotError('Vega spectrum is missing.') flux_vega = vegaspec(wavelengths) out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_vega(wavelengths, flux_vega)) # OBMAG or count elif (u.count in (fluxes.unit, out_flux_unit) or OBMAG.to_string() in flux_unit_names): if area is None: raise exceptions.SynphotError( 'Area is compulsory for conversion involving count or OBMAG.') elif not isinstance(area, u.Quantity): area = area * AREA out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_count(wavelengths, area)) else: raise u.UnitsError('{0} and {1} are not convertible'.format( fluxes.unit, out_flux_unit)) return out_flux
[ "def", "_convert_flux", "(", "wavelengths", ",", "fluxes", ",", "out_flux_unit", ",", "area", "=", "None", ",", "vegaspec", "=", "None", ")", ":", "flux_unit_names", "=", "(", "fluxes", ".", "unit", ".", "to_string", "(", ")", ",", "out_flux_unit", ".", "to_string", "(", ")", ")", "if", "PHOTLAM", ".", "to_string", "(", ")", "not", "in", "flux_unit_names", ":", "raise", "exceptions", ".", "SynphotError", "(", "'PHOTLAM must be one of the conversion units but get '", "'{0}.'", ".", "format", "(", "flux_unit_names", ")", ")", "# VEGAMAG", "if", "VEGAMAG", ".", "to_string", "(", ")", "in", "flux_unit_names", ":", "from", ".", "spectrum", "import", "SourceSpectrum", "if", "not", "isinstance", "(", "vegaspec", ",", "SourceSpectrum", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Vega spectrum is missing.'", ")", "flux_vega", "=", "vegaspec", "(", "wavelengths", ")", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "equivalencies", "=", "spectral_density_vega", "(", "wavelengths", ",", "flux_vega", ")", ")", "# OBMAG or count", "elif", "(", "u", ".", "count", "in", "(", "fluxes", ".", "unit", ",", "out_flux_unit", ")", "or", "OBMAG", ".", "to_string", "(", ")", "in", "flux_unit_names", ")", ":", "if", "area", "is", "None", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Area is compulsory for conversion involving count or OBMAG.'", ")", "elif", "not", "isinstance", "(", "area", ",", "u", ".", "Quantity", ")", ":", "area", "=", "area", "*", "AREA", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "equivalencies", "=", "spectral_density_count", "(", "wavelengths", ",", "area", ")", ")", "else", ":", "raise", "u", ".", "UnitsError", "(", "'{0} and {1} are not convertible'", ".", "format", "(", "fluxes", ".", "unit", ",", "out_flux_unit", ")", ")", "return", "out_flux" ]
Flux conversion for PHOTLAM <-> X.
[ "Flux", "conversion", "for", "PHOTLAM", "<", "-", ">", "X", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L228-L268
train
spacetelescope/synphot_refactor
synphot/units.py
validate_unit
def validate_unit(input_unit): """Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. """ if isinstance(input_unit, str): input_unit_lowcase = input_unit.lower() # Backward-compatibility if input_unit_lowcase == 'angstroms': output_unit = u.AA elif input_unit_lowcase == 'inversemicrons': output_unit = u.micron ** -1 elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'): output_unit = THROUGHPUT elif input_unit_lowcase == 'jy': output_unit = u.Jy # Work around mag unit limitations elif input_unit_lowcase in ('stmag', 'mag(st)'): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag else: try: # astropy.units is case-sensitive output_unit = u.Unit(input_unit) except ValueError: # synphot is case-insensitive output_unit = u.Unit(input_unit_lowcase) elif isinstance(input_unit, (u.UnitBase, u.LogUnit)): output_unit = input_unit else: raise exceptions.SynphotError( '{0} must be a recognized string or ' 'astropy.units.core.Unit'.format(input_unit)) return output_unit
python
def validate_unit(input_unit): """Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. """ if isinstance(input_unit, str): input_unit_lowcase = input_unit.lower() # Backward-compatibility if input_unit_lowcase == 'angstroms': output_unit = u.AA elif input_unit_lowcase == 'inversemicrons': output_unit = u.micron ** -1 elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'): output_unit = THROUGHPUT elif input_unit_lowcase == 'jy': output_unit = u.Jy # Work around mag unit limitations elif input_unit_lowcase in ('stmag', 'mag(st)'): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag else: try: # astropy.units is case-sensitive output_unit = u.Unit(input_unit) except ValueError: # synphot is case-insensitive output_unit = u.Unit(input_unit_lowcase) elif isinstance(input_unit, (u.UnitBase, u.LogUnit)): output_unit = input_unit else: raise exceptions.SynphotError( '{0} must be a recognized string or ' 'astropy.units.core.Unit'.format(input_unit)) return output_unit
[ "def", "validate_unit", "(", "input_unit", ")", ":", "if", "isinstance", "(", "input_unit", ",", "str", ")", ":", "input_unit_lowcase", "=", "input_unit", ".", "lower", "(", ")", "# Backward-compatibility", "if", "input_unit_lowcase", "==", "'angstroms'", ":", "output_unit", "=", "u", ".", "AA", "elif", "input_unit_lowcase", "==", "'inversemicrons'", ":", "output_unit", "=", "u", ".", "micron", "**", "-", "1", "elif", "input_unit_lowcase", "in", "(", "'transmission'", ",", "'extinction'", ",", "'emissivity'", ")", ":", "output_unit", "=", "THROUGHPUT", "elif", "input_unit_lowcase", "==", "'jy'", ":", "output_unit", "=", "u", ".", "Jy", "# Work around mag unit limitations", "elif", "input_unit_lowcase", "in", "(", "'stmag'", ",", "'mag(st)'", ")", ":", "output_unit", "=", "u", ".", "STmag", "elif", "input_unit_lowcase", "in", "(", "'abmag'", ",", "'mag(ab)'", ")", ":", "output_unit", "=", "u", ".", "ABmag", "else", ":", "try", ":", "# astropy.units is case-sensitive", "output_unit", "=", "u", ".", "Unit", "(", "input_unit", ")", "except", "ValueError", ":", "# synphot is case-insensitive", "output_unit", "=", "u", ".", "Unit", "(", "input_unit_lowcase", ")", "elif", "isinstance", "(", "input_unit", ",", "(", "u", ".", "UnitBase", ",", "u", ".", "LogUnit", ")", ")", ":", "output_unit", "=", "input_unit", "else", ":", "raise", "exceptions", ".", "SynphotError", "(", "'{0} must be a recognized string or '", "'astropy.units.core.Unit'", ".", "format", "(", "input_unit", ")", ")", "return", "output_unit" ]
Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit.
[ "Validate", "unit", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L275-L335
train
spacetelescope/synphot_refactor
synphot/units.py
validate_wave_unit
def validate_wave_unit(wave_unit): """Like :func:`validate_unit` but specific to wavelength.""" output_unit = validate_unit(wave_unit) unit_type = output_unit.physical_type if unit_type not in ('length', 'wavenumber', 'frequency'): raise exceptions.SynphotError( 'wavelength physical type is not length, wave number, or ' 'frequency: {0}'.format(unit_type)) return output_unit
python
def validate_wave_unit(wave_unit): """Like :func:`validate_unit` but specific to wavelength.""" output_unit = validate_unit(wave_unit) unit_type = output_unit.physical_type if unit_type not in ('length', 'wavenumber', 'frequency'): raise exceptions.SynphotError( 'wavelength physical type is not length, wave number, or ' 'frequency: {0}'.format(unit_type)) return output_unit
[ "def", "validate_wave_unit", "(", "wave_unit", ")", ":", "output_unit", "=", "validate_unit", "(", "wave_unit", ")", "unit_type", "=", "output_unit", ".", "physical_type", "if", "unit_type", "not", "in", "(", "'length'", ",", "'wavenumber'", ",", "'frequency'", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'wavelength physical type is not length, wave number, or '", "'frequency: {0}'", ".", "format", "(", "unit_type", ")", ")", "return", "output_unit" ]
Like :func:`validate_unit` but specific to wavelength.
[ "Like", ":", "func", ":", "validate_unit", "but", "specific", "to", "wavelength", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L338-L348
train
spacetelescope/synphot_refactor
synphot/units.py
validate_quantity
def validate_quantity(input_value, output_unit, equivalencies=[]): """Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. """ output_unit = validate_unit(output_unit) if isinstance(input_value, u.Quantity): output_value = input_value.to(output_unit, equivalencies=equivalencies) else: output_value = input_value * output_unit return output_value
python
def validate_quantity(input_value, output_unit, equivalencies=[]): """Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. """ output_unit = validate_unit(output_unit) if isinstance(input_value, u.Quantity): output_value = input_value.to(output_unit, equivalencies=equivalencies) else: output_value = input_value * output_unit return output_value
[ "def", "validate_quantity", "(", "input_value", ",", "output_unit", ",", "equivalencies", "=", "[", "]", ")", ":", "output_unit", "=", "validate_unit", "(", "output_unit", ")", "if", "isinstance", "(", "input_value", ",", "u", ".", "Quantity", ")", ":", "output_value", "=", "input_value", ".", "to", "(", "output_unit", ",", "equivalencies", "=", "equivalencies", ")", "else", ":", "output_value", "=", "input_value", "*", "output_unit", "return", "output_value" ]
Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit.
[ "Validate", "quantity", "(", "value", "and", "unit", ")", "." ]
9c064f3cff0c41dd8acadc0f67c6350931275b9f
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L351-L383
train
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.add
def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
python
def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
[ "def", "add", "(", "self", ",", "device", ")", ":", "if", "not", "isinstance", "(", "device", ",", "Device", ")", ":", "raise", "TypeError", "(", ")", "self", ".", "__devices", ".", "append", "(", "device", ")" ]
Add device.
[ "Add", "device", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L36-L40
train
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.data_import
def data_import(self, json_response): """Import data from json response.""" if 'data' not in json_response: raise PyVLXException('no element data found: {0}'.format( json.dumps(json_response))) data = json_response['data'] for item in data: if 'category' not in item: raise PyVLXException('no element category: {0}'.format( json.dumps(item))) category = item['category'] if category == 'Window opener': self.load_window_opener(item) elif category in ['Roller shutter', 'Dual Shutter']: self.load_roller_shutter(item) elif category in ['Blind']: self.load_blind(item) else: self.pyvlx.logger.warning( 'WARNING: Could not parse product: %s', category)
python
def data_import(self, json_response): """Import data from json response.""" if 'data' not in json_response: raise PyVLXException('no element data found: {0}'.format( json.dumps(json_response))) data = json_response['data'] for item in data: if 'category' not in item: raise PyVLXException('no element category: {0}'.format( json.dumps(item))) category = item['category'] if category == 'Window opener': self.load_window_opener(item) elif category in ['Roller shutter', 'Dual Shutter']: self.load_roller_shutter(item) elif category in ['Blind']: self.load_blind(item) else: self.pyvlx.logger.warning( 'WARNING: Could not parse product: %s', category)
[ "def", "data_import", "(", "self", ",", "json_response", ")", ":", "if", "'data'", "not", "in", "json_response", ":", "raise", "PyVLXException", "(", "'no element data found: {0}'", ".", "format", "(", "json", ".", "dumps", "(", "json_response", ")", ")", ")", "data", "=", "json_response", "[", "'data'", "]", "for", "item", "in", "data", ":", "if", "'category'", "not", "in", "item", ":", "raise", "PyVLXException", "(", "'no element category: {0}'", ".", "format", "(", "json", ".", "dumps", "(", "item", ")", ")", ")", "category", "=", "item", "[", "'category'", "]", "if", "category", "==", "'Window opener'", ":", "self", ".", "load_window_opener", "(", "item", ")", "elif", "category", "in", "[", "'Roller shutter'", ",", "'Dual Shutter'", "]", ":", "self", ".", "load_roller_shutter", "(", "item", ")", "elif", "category", "in", "[", "'Blind'", "]", ":", "self", ".", "load_blind", "(", "item", ")", "else", ":", "self", ".", "pyvlx", ".", "logger", ".", "warning", "(", "'WARNING: Could not parse product: %s'", ",", "category", ")" ]
Import data from json response.
[ "Import", "data", "from", "json", "response", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L47-L67
train
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.load_window_opener
def load_window_opener(self, item): """Load window opener from JSON.""" window = Window.from_config(self.pyvlx, item) self.add(window)
python
def load_window_opener(self, item): """Load window opener from JSON.""" window = Window.from_config(self.pyvlx, item) self.add(window)
[ "def", "load_window_opener", "(", "self", ",", "item", ")", ":", "window", "=", "Window", ".", "from_config", "(", "self", ".", "pyvlx", ",", "item", ")", "self", ".", "add", "(", "window", ")" ]
Load window opener from JSON.
[ "Load", "window", "opener", "from", "JSON", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L69-L72
train
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.load_roller_shutter
def load_roller_shutter(self, item): """Load roller shutter from JSON.""" rollershutter = RollerShutter.from_config(self.pyvlx, item) self.add(rollershutter)
python
def load_roller_shutter(self, item): """Load roller shutter from JSON.""" rollershutter = RollerShutter.from_config(self.pyvlx, item) self.add(rollershutter)
[ "def", "load_roller_shutter", "(", "self", ",", "item", ")", ":", "rollershutter", "=", "RollerShutter", ".", "from_config", "(", "self", ".", "pyvlx", ",", "item", ")", "self", ".", "add", "(", "rollershutter", ")" ]
Load roller shutter from JSON.
[ "Load", "roller", "shutter", "from", "JSON", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L74-L77
train
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.load_blind
def load_blind(self, item): """Load blind from JSON.""" blind = Blind.from_config(self.pyvlx, item) self.add(blind)
python
def load_blind(self, item): """Load blind from JSON.""" blind = Blind.from_config(self.pyvlx, item) self.add(blind)
[ "def", "load_blind", "(", "self", ",", "item", ")", ":", "blind", "=", "Blind", ".", "from_config", "(", "self", ".", "pyvlx", ",", "item", ")", "self", ".", "add", "(", "blind", ")" ]
Load blind from JSON.
[ "Load", "blind", "from", "JSON", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L79-L82
train
cs50/style50
style50/_api.py
get_terminal_size
def get_terminal_size(fallback=(80, 24)): """ Return tuple containing columns and rows of controlling terminal, trying harder than shutil.get_terminal_size to find a tty before returning fallback. Theoretically, stdout, stderr, and stdin could all be different ttys that could cause us to get the wrong measurements (instead of using the fallback) but the much more common case is that IO is piped. """ for stream in [sys.__stdout__, sys.__stderr__, sys.__stdin__]: try: # Make WINSIZE call to terminal data = fcntl.ioctl(stream.fileno(), TIOCGWINSZ, b"\x00\x00\00\x00") except OSError: pass else: # Unpack two shorts from ioctl call lines, columns = struct.unpack("hh", data) break else: columns, lines = fallback return columns, lines
python
def get_terminal_size(fallback=(80, 24)): """ Return tuple containing columns and rows of controlling terminal, trying harder than shutil.get_terminal_size to find a tty before returning fallback. Theoretically, stdout, stderr, and stdin could all be different ttys that could cause us to get the wrong measurements (instead of using the fallback) but the much more common case is that IO is piped. """ for stream in [sys.__stdout__, sys.__stderr__, sys.__stdin__]: try: # Make WINSIZE call to terminal data = fcntl.ioctl(stream.fileno(), TIOCGWINSZ, b"\x00\x00\00\x00") except OSError: pass else: # Unpack two shorts from ioctl call lines, columns = struct.unpack("hh", data) break else: columns, lines = fallback return columns, lines
[ "def", "get_terminal_size", "(", "fallback", "=", "(", "80", ",", "24", ")", ")", ":", "for", "stream", "in", "[", "sys", ".", "__stdout__", ",", "sys", ".", "__stderr__", ",", "sys", ".", "__stdin__", "]", ":", "try", ":", "# Make WINSIZE call to terminal", "data", "=", "fcntl", ".", "ioctl", "(", "stream", ".", "fileno", "(", ")", ",", "TIOCGWINSZ", ",", "b\"\\x00\\x00\\00\\x00\"", ")", "except", "OSError", ":", "pass", "else", ":", "# Unpack two shorts from ioctl call", "lines", ",", "columns", "=", "struct", ".", "unpack", "(", "\"hh\"", ",", "data", ")", "break", "else", ":", "columns", ",", "lines", "=", "fallback", "return", "columns", ",", "lines" ]
Return tuple containing columns and rows of controlling terminal, trying harder than shutil.get_terminal_size to find a tty before returning fallback. Theoretically, stdout, stderr, and stdin could all be different ttys that could cause us to get the wrong measurements (instead of using the fallback) but the much more common case is that IO is piped.
[ "Return", "tuple", "containing", "columns", "and", "rows", "of", "controlling", "terminal", "trying", "harder", "than", "shutil", ".", "get_terminal_size", "to", "find", "a", "tty", "before", "returning", "fallback", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L23-L45
train
cs50/style50
style50/_api.py
Style50.run_diff
def run_diff(self): """ Run checks on self.files, printing diff of styled/unstyled output to stdout. """ files = tuple(self.files) # Use same header as more. header, footer = (termcolor.colored("{0}\n{{}}\n{0}\n".format( ":" * 14), "cyan"), "\n") if len(files) > 1 else ("", "") for file in files: print(header.format(file), end="") try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue # Display results if results.diffs: print() print(*self.diff(results.original, results.styled), sep="\n") print() conjunction = "And" else: termcolor.cprint("Looks good!", "green") conjunction = "But" if results.diffs: for type, c in sorted(self._warn_chars): color, verb = ("on_green", "insert") if type == "+" else ("on_red", "delete") termcolor.cprint(c, None, color, end="") termcolor.cprint(" means that you should {} a {}.".format( verb, "newline" if c == "\\n" else "tab"), "yellow") if results.comment_ratio < results.COMMENT_MIN: termcolor.cprint("{} consider adding more comments!".format(conjunction), "yellow") if (results.comment_ratio < results.COMMENT_MIN or self._warn_chars) and results.diffs: print()
python
def run_diff(self): """ Run checks on self.files, printing diff of styled/unstyled output to stdout. """ files = tuple(self.files) # Use same header as more. header, footer = (termcolor.colored("{0}\n{{}}\n{0}\n".format( ":" * 14), "cyan"), "\n") if len(files) > 1 else ("", "") for file in files: print(header.format(file), end="") try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue # Display results if results.diffs: print() print(*self.diff(results.original, results.styled), sep="\n") print() conjunction = "And" else: termcolor.cprint("Looks good!", "green") conjunction = "But" if results.diffs: for type, c in sorted(self._warn_chars): color, verb = ("on_green", "insert") if type == "+" else ("on_red", "delete") termcolor.cprint(c, None, color, end="") termcolor.cprint(" means that you should {} a {}.".format( verb, "newline" if c == "\\n" else "tab"), "yellow") if results.comment_ratio < results.COMMENT_MIN: termcolor.cprint("{} consider adding more comments!".format(conjunction), "yellow") if (results.comment_ratio < results.COMMENT_MIN or self._warn_chars) and results.diffs: print()
[ "def", "run_diff", "(", "self", ")", ":", "files", "=", "tuple", "(", "self", ".", "files", ")", "# Use same header as more.", "header", ",", "footer", "=", "(", "termcolor", ".", "colored", "(", "\"{0}\\n{{}}\\n{0}\\n\"", ".", "format", "(", "\":\"", "*", "14", ")", ",", "\"cyan\"", ")", ",", "\"\\n\"", ")", "if", "len", "(", "files", ")", ">", "1", "else", "(", "\"\"", ",", "\"\"", ")", "for", "file", "in", "files", ":", "print", "(", "header", ".", "format", "(", "file", ")", ",", "end", "=", "\"\"", ")", "try", ":", "results", "=", "self", ".", "_check", "(", "file", ")", "except", "Error", "as", "e", ":", "termcolor", ".", "cprint", "(", "e", ".", "msg", ",", "\"yellow\"", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "# Display results", "if", "results", ".", "diffs", ":", "print", "(", ")", "print", "(", "*", "self", ".", "diff", "(", "results", ".", "original", ",", "results", ".", "styled", ")", ",", "sep", "=", "\"\\n\"", ")", "print", "(", ")", "conjunction", "=", "\"And\"", "else", ":", "termcolor", ".", "cprint", "(", "\"Looks good!\"", ",", "\"green\"", ")", "conjunction", "=", "\"But\"", "if", "results", ".", "diffs", ":", "for", "type", ",", "c", "in", "sorted", "(", "self", ".", "_warn_chars", ")", ":", "color", ",", "verb", "=", "(", "\"on_green\"", ",", "\"insert\"", ")", "if", "type", "==", "\"+\"", "else", "(", "\"on_red\"", ",", "\"delete\"", ")", "termcolor", ".", "cprint", "(", "c", ",", "None", ",", "color", ",", "end", "=", "\"\"", ")", "termcolor", ".", "cprint", "(", "\" means that you should {} a {}.\"", ".", "format", "(", "verb", ",", "\"newline\"", "if", "c", "==", "\"\\\\n\"", "else", "\"tab\"", ")", ",", "\"yellow\"", ")", "if", "results", ".", "comment_ratio", "<", "results", ".", "COMMENT_MIN", ":", "termcolor", ".", "cprint", "(", "\"{} consider adding more comments!\"", ".", "format", "(", "conjunction", ")", ",", "\"yellow\"", ")", "if", "(", "results", ".", "comment_ratio", "<", "results", ".", "COMMENT_MIN", "or", "self", ".", "_warn_chars", ")", "and", "results", ".", "diffs", ":", "print", "(", ")" ]
Run checks on self.files, printing diff of styled/unstyled output to stdout.
[ "Run", "checks", "on", "self", ".", "files", "printing", "diff", "of", "styled", "/", "unstyled", "output", "to", "stdout", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L96-L134
train
cs50/style50
style50/_api.py
Style50.run_json
def run_json(self): """ Run checks on self.files, printing json object containing information relavent to the CS50 IDE plugin at the end. """ checks = {} for file in self.files: try: results = self._check(file) except Error as e: checks[file] = { "error": e.msg } else: checks[file] = { "score": results.score, "comments": results.comment_ratio >= results.COMMENT_MIN, "diff": "<pre>{}</pre>".format("\n".join(self.html_diff(results.original, results.styled))), } json.dump(checks, sys.stdout, indent=4) print()
python
def run_json(self): """ Run checks on self.files, printing json object containing information relavent to the CS50 IDE plugin at the end. """ checks = {} for file in self.files: try: results = self._check(file) except Error as e: checks[file] = { "error": e.msg } else: checks[file] = { "score": results.score, "comments": results.comment_ratio >= results.COMMENT_MIN, "diff": "<pre>{}</pre>".format("\n".join(self.html_diff(results.original, results.styled))), } json.dump(checks, sys.stdout, indent=4) print()
[ "def", "run_json", "(", "self", ")", ":", "checks", "=", "{", "}", "for", "file", "in", "self", ".", "files", ":", "try", ":", "results", "=", "self", ".", "_check", "(", "file", ")", "except", "Error", "as", "e", ":", "checks", "[", "file", "]", "=", "{", "\"error\"", ":", "e", ".", "msg", "}", "else", ":", "checks", "[", "file", "]", "=", "{", "\"score\"", ":", "results", ".", "score", ",", "\"comments\"", ":", "results", ".", "comment_ratio", ">=", "results", ".", "COMMENT_MIN", ",", "\"diff\"", ":", "\"<pre>{}</pre>\"", ".", "format", "(", "\"\\n\"", ".", "join", "(", "self", ".", "html_diff", "(", "results", ".", "original", ",", "results", ".", "styled", ")", ")", ")", ",", "}", "json", ".", "dump", "(", "checks", ",", "sys", ".", "stdout", ",", "indent", "=", "4", ")", "print", "(", ")" ]
Run checks on self.files, printing json object containing information relavent to the CS50 IDE plugin at the end.
[ "Run", "checks", "on", "self", ".", "files", "printing", "json", "object", "containing", "information", "relavent", "to", "the", "CS50", "IDE", "plugin", "at", "the", "end", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L136-L157
train
cs50/style50
style50/_api.py
Style50.run_score
def run_score(self): """ Run checks on self.files, printing raw percentage to stdout. """ diffs = 0 lines = 0 for file in self.files: try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue diffs += results.diffs lines += results.lines try: print(max(1 - diffs / lines, 0.0)) except ZeroDivisionError: print(0.0)
python
def run_score(self): """ Run checks on self.files, printing raw percentage to stdout. """ diffs = 0 lines = 0 for file in self.files: try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue diffs += results.diffs lines += results.lines try: print(max(1 - diffs / lines, 0.0)) except ZeroDivisionError: print(0.0)
[ "def", "run_score", "(", "self", ")", ":", "diffs", "=", "0", "lines", "=", "0", "for", "file", "in", "self", ".", "files", ":", "try", ":", "results", "=", "self", ".", "_check", "(", "file", ")", "except", "Error", "as", "e", ":", "termcolor", ".", "cprint", "(", "e", ".", "msg", ",", "\"yellow\"", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "diffs", "+=", "results", ".", "diffs", "lines", "+=", "results", ".", "lines", "try", ":", "print", "(", "max", "(", "1", "-", "diffs", "/", "lines", ",", "0.0", ")", ")", "except", "ZeroDivisionError", ":", "print", "(", "0.0", ")" ]
Run checks on self.files, printing raw percentage to stdout.
[ "Run", "checks", "on", "self", ".", "files", "printing", "raw", "percentage", "to", "stdout", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L159-L179
train
cs50/style50
style50/_api.py
Style50._check
def _check(self, file): """ Run apropriate check based on `file`'s extension and return it, otherwise raise an Error """ if not os.path.exists(file): raise Error("file \"{}\" not found".format(file)) _, extension = os.path.splitext(file) try: check = self.extension_map[extension[1:]] except KeyError: magic_type = magic.from_file(file) for name, cls in self.magic_map.items(): if name in magic_type: check = cls break else: raise Error("unknown file type \"{}\", skipping...".format(file)) try: with open(file) as f: code = "\n".join(line.rstrip() for line in f) except UnicodeDecodeError: raise Error("file does not seem to contain text, skipping...") # Ensure we don't warn about adding trailing newline try: if code[-1] != '\n': code += '\n' except IndexError: pass return check(code)
python
def _check(self, file): """ Run apropriate check based on `file`'s extension and return it, otherwise raise an Error """ if not os.path.exists(file): raise Error("file \"{}\" not found".format(file)) _, extension = os.path.splitext(file) try: check = self.extension_map[extension[1:]] except KeyError: magic_type = magic.from_file(file) for name, cls in self.magic_map.items(): if name in magic_type: check = cls break else: raise Error("unknown file type \"{}\", skipping...".format(file)) try: with open(file) as f: code = "\n".join(line.rstrip() for line in f) except UnicodeDecodeError: raise Error("file does not seem to contain text, skipping...") # Ensure we don't warn about adding trailing newline try: if code[-1] != '\n': code += '\n' except IndexError: pass return check(code)
[ "def", "_check", "(", "self", ",", "file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "raise", "Error", "(", "\"file \\\"{}\\\" not found\"", ".", "format", "(", "file", ")", ")", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "try", ":", "check", "=", "self", ".", "extension_map", "[", "extension", "[", "1", ":", "]", "]", "except", "KeyError", ":", "magic_type", "=", "magic", ".", "from_file", "(", "file", ")", "for", "name", ",", "cls", "in", "self", ".", "magic_map", ".", "items", "(", ")", ":", "if", "name", "in", "magic_type", ":", "check", "=", "cls", "break", "else", ":", "raise", "Error", "(", "\"unknown file type \\\"{}\\\", skipping...\"", ".", "format", "(", "file", ")", ")", "try", ":", "with", "open", "(", "file", ")", "as", "f", ":", "code", "=", "\"\\n\"", ".", "join", "(", "line", ".", "rstrip", "(", ")", "for", "line", "in", "f", ")", "except", "UnicodeDecodeError", ":", "raise", "Error", "(", "\"file does not seem to contain text, skipping...\"", ")", "# Ensure we don't warn about adding trailing newline", "try", ":", "if", "code", "[", "-", "1", "]", "!=", "'\\n'", ":", "code", "+=", "'\\n'", "except", "IndexError", ":", "pass", "return", "check", "(", "code", ")" ]
Run apropriate check based on `file`'s extension and return it, otherwise raise an Error
[ "Run", "apropriate", "check", "based", "on", "file", "s", "extension", "and", "return", "it", "otherwise", "raise", "an", "Error" ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L181-L215
train
cs50/style50
style50/_api.py
Style50.split_diff
def split_diff(old, new): """ Returns a generator yielding the side-by-side diff of `old` and `new`). """ return map(lambda l: l.rstrip(), icdiff.ConsoleDiff(cols=COLUMNS).make_table(old.splitlines(), new.splitlines()))
python
def split_diff(old, new): """ Returns a generator yielding the side-by-side diff of `old` and `new`). """ return map(lambda l: l.rstrip(), icdiff.ConsoleDiff(cols=COLUMNS).make_table(old.splitlines(), new.splitlines()))
[ "def", "split_diff", "(", "old", ",", "new", ")", ":", "return", "map", "(", "lambda", "l", ":", "l", ".", "rstrip", "(", ")", ",", "icdiff", ".", "ConsoleDiff", "(", "cols", "=", "COLUMNS", ")", ".", "make_table", "(", "old", ".", "splitlines", "(", ")", ",", "new", ".", "splitlines", "(", ")", ")", ")" ]
Returns a generator yielding the side-by-side diff of `old` and `new`).
[ "Returns", "a", "generator", "yielding", "the", "side", "-", "by", "-", "side", "diff", "of", "old", "and", "new", ")", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L218-L223
train
cs50/style50
style50/_api.py
Style50.unified
def unified(old, new): """ Returns a generator yielding a unified diff between `old` and `new`. """ for diff in difflib.ndiff(old.splitlines(), new.splitlines()): if diff[0] == " ": yield diff elif diff[0] == "?": continue else: yield termcolor.colored(diff, "red" if diff[0] == "-" else "green", attrs=["bold"])
python
def unified(old, new): """ Returns a generator yielding a unified diff between `old` and `new`. """ for diff in difflib.ndiff(old.splitlines(), new.splitlines()): if diff[0] == " ": yield diff elif diff[0] == "?": continue else: yield termcolor.colored(diff, "red" if diff[0] == "-" else "green", attrs=["bold"])
[ "def", "unified", "(", "old", ",", "new", ")", ":", "for", "diff", "in", "difflib", ".", "ndiff", "(", "old", ".", "splitlines", "(", ")", ",", "new", ".", "splitlines", "(", ")", ")", ":", "if", "diff", "[", "0", "]", "==", "\" \"", ":", "yield", "diff", "elif", "diff", "[", "0", "]", "==", "\"?\"", ":", "continue", "else", ":", "yield", "termcolor", ".", "colored", "(", "diff", ",", "\"red\"", "if", "diff", "[", "0", "]", "==", "\"-\"", "else", "\"green\"", ",", "attrs", "=", "[", "\"bold\"", "]", ")" ]
Returns a generator yielding a unified diff between `old` and `new`.
[ "Returns", "a", "generator", "yielding", "a", "unified", "diff", "between", "old", "and", "new", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L226-L236
train
cs50/style50
style50/_api.py
Style50.html_diff
def html_diff(self, old, new): """ Return HTML formatted character-based diff between old and new (used for CS50 IDE). """ def html_transition(old_type, new_type): tags = [] for tag in [("/", old_type), ("", new_type)]: if tag[1] not in ["+", "-"]: continue tags.append("<{}{}>".format(tag[0], "ins" if tag[1] == "+" else "del")) return "".join(tags) return self._char_diff(old, new, html_transition, fmt=cgi.escape)
python
def html_diff(self, old, new): """ Return HTML formatted character-based diff between old and new (used for CS50 IDE). """ def html_transition(old_type, new_type): tags = [] for tag in [("/", old_type), ("", new_type)]: if tag[1] not in ["+", "-"]: continue tags.append("<{}{}>".format(tag[0], "ins" if tag[1] == "+" else "del")) return "".join(tags) return self._char_diff(old, new, html_transition, fmt=cgi.escape)
[ "def", "html_diff", "(", "self", ",", "old", ",", "new", ")", ":", "def", "html_transition", "(", "old_type", ",", "new_type", ")", ":", "tags", "=", "[", "]", "for", "tag", "in", "[", "(", "\"/\"", ",", "old_type", ")", ",", "(", "\"\"", ",", "new_type", ")", "]", ":", "if", "tag", "[", "1", "]", "not", "in", "[", "\"+\"", ",", "\"-\"", "]", ":", "continue", "tags", ".", "append", "(", "\"<{}{}>\"", ".", "format", "(", "tag", "[", "0", "]", ",", "\"ins\"", "if", "tag", "[", "1", "]", "==", "\"+\"", "else", "\"del\"", ")", ")", "return", "\"\"", ".", "join", "(", "tags", ")", "return", "self", ".", "_char_diff", "(", "old", ",", "new", ",", "html_transition", ",", "fmt", "=", "cgi", ".", "escape", ")" ]
Return HTML formatted character-based diff between old and new (used for CS50 IDE).
[ "Return", "HTML", "formatted", "character", "-", "based", "diff", "between", "old", "and", "new", "(", "used", "for", "CS50", "IDE", ")", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L238-L250
train
cs50/style50
style50/_api.py
Style50.char_diff
def char_diff(self, old, new): """ Return color-coded character-based diff between `old` and `new`. """ def color_transition(old_type, new_type): new_color = termcolor.colored("", None, "on_red" if new_type == "-" else "on_green" if new_type == "+" else None) return "{}{}".format(termcolor.RESET, new_color[:-len(termcolor.RESET)]) return self._char_diff(old, new, color_transition)
python
def char_diff(self, old, new): """ Return color-coded character-based diff between `old` and `new`. """ def color_transition(old_type, new_type): new_color = termcolor.colored("", None, "on_red" if new_type == "-" else "on_green" if new_type == "+" else None) return "{}{}".format(termcolor.RESET, new_color[:-len(termcolor.RESET)]) return self._char_diff(old, new, color_transition)
[ "def", "char_diff", "(", "self", ",", "old", ",", "new", ")", ":", "def", "color_transition", "(", "old_type", ",", "new_type", ")", ":", "new_color", "=", "termcolor", ".", "colored", "(", "\"\"", ",", "None", ",", "\"on_red\"", "if", "new_type", "==", "\"-\"", "else", "\"on_green\"", "if", "new_type", "==", "\"+\"", "else", "None", ")", "return", "\"{}{}\"", ".", "format", "(", "termcolor", ".", "RESET", ",", "new_color", "[", ":", "-", "len", "(", "termcolor", ".", "RESET", ")", "]", ")", "return", "self", ".", "_char_diff", "(", "old", ",", "new", ",", "color_transition", ")" ]
Return color-coded character-based diff between `old` and `new`.
[ "Return", "color", "-", "coded", "character", "-", "based", "diff", "between", "old", "and", "new", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L252-L261
train
cs50/style50
style50/_api.py
Style50._char_diff
def _char_diff(self, old, new, transition, fmt=lambda c: c): """ Returns a char-based diff between `old` and `new` where each character is formatted by `fmt` and transitions between blocks are determined by `transition`. """ differ = difflib.ndiff(old, new) # Type of difference. dtype = None # Buffer for current line. line = [] while True: # Get next diff or None if we're at the end. d = next(differ, (None,)) if d[0] != dtype: line += transition(dtype, d[0]) dtype = d[0] if dtype is None: break if d[2] == "\n": if dtype != " ": self._warn_chars.add((dtype, "\\n")) # Show added/removed newlines. line += [fmt(r"\n"), transition(dtype, " ")] # Don't yield a line if we are removing a newline if dtype != "-": yield "".join(line) line.clear() line.append(transition(" ", dtype)) elif dtype != " " and d[2] == "\t": # Show added/removed tabs. line.append(fmt("\\t")) self._warn_chars.add((dtype, "\\t")) else: line.append(fmt(d[2])) # Flush buffer before quitting. last = "".join(line) # Only print last line if it contains non-ANSI characters. if re.sub(r"\x1b[^m]*m", "", last): yield last
python
def _char_diff(self, old, new, transition, fmt=lambda c: c): """ Returns a char-based diff between `old` and `new` where each character is formatted by `fmt` and transitions between blocks are determined by `transition`. """ differ = difflib.ndiff(old, new) # Type of difference. dtype = None # Buffer for current line. line = [] while True: # Get next diff or None if we're at the end. d = next(differ, (None,)) if d[0] != dtype: line += transition(dtype, d[0]) dtype = d[0] if dtype is None: break if d[2] == "\n": if dtype != " ": self._warn_chars.add((dtype, "\\n")) # Show added/removed newlines. line += [fmt(r"\n"), transition(dtype, " ")] # Don't yield a line if we are removing a newline if dtype != "-": yield "".join(line) line.clear() line.append(transition(" ", dtype)) elif dtype != " " and d[2] == "\t": # Show added/removed tabs. line.append(fmt("\\t")) self._warn_chars.add((dtype, "\\t")) else: line.append(fmt(d[2])) # Flush buffer before quitting. last = "".join(line) # Only print last line if it contains non-ANSI characters. if re.sub(r"\x1b[^m]*m", "", last): yield last
[ "def", "_char_diff", "(", "self", ",", "old", ",", "new", ",", "transition", ",", "fmt", "=", "lambda", "c", ":", "c", ")", ":", "differ", "=", "difflib", ".", "ndiff", "(", "old", ",", "new", ")", "# Type of difference.", "dtype", "=", "None", "# Buffer for current line.", "line", "=", "[", "]", "while", "True", ":", "# Get next diff or None if we're at the end.", "d", "=", "next", "(", "differ", ",", "(", "None", ",", ")", ")", "if", "d", "[", "0", "]", "!=", "dtype", ":", "line", "+=", "transition", "(", "dtype", ",", "d", "[", "0", "]", ")", "dtype", "=", "d", "[", "0", "]", "if", "dtype", "is", "None", ":", "break", "if", "d", "[", "2", "]", "==", "\"\\n\"", ":", "if", "dtype", "!=", "\" \"", ":", "self", ".", "_warn_chars", ".", "add", "(", "(", "dtype", ",", "\"\\\\n\"", ")", ")", "# Show added/removed newlines.", "line", "+=", "[", "fmt", "(", "r\"\\n\"", ")", ",", "transition", "(", "dtype", ",", "\" \"", ")", "]", "# Don't yield a line if we are removing a newline", "if", "dtype", "!=", "\"-\"", ":", "yield", "\"\"", ".", "join", "(", "line", ")", "line", ".", "clear", "(", ")", "line", ".", "append", "(", "transition", "(", "\" \"", ",", "dtype", ")", ")", "elif", "dtype", "!=", "\" \"", "and", "d", "[", "2", "]", "==", "\"\\t\"", ":", "# Show added/removed tabs.", "line", ".", "append", "(", "fmt", "(", "\"\\\\t\"", ")", ")", "self", ".", "_warn_chars", ".", "add", "(", "(", "dtype", ",", "\"\\\\t\"", ")", ")", "else", ":", "line", ".", "append", "(", "fmt", "(", "d", "[", "2", "]", ")", ")", "# Flush buffer before quitting.", "last", "=", "\"\"", ".", "join", "(", "line", ")", "# Only print last line if it contains non-ANSI characters.", "if", "re", ".", "sub", "(", "r\"\\x1b[^m]*m\"", ",", "\"\"", ",", "last", ")", ":", "yield", "last" ]
Returns a char-based diff between `old` and `new` where each character is formatted by `fmt` and transitions between blocks are determined by `transition`.
[ "Returns", "a", "char", "-", "based", "diff", "between", "old", "and", "new", "where", "each", "character", "is", "formatted", "by", "fmt", "and", "transitions", "between", "blocks", "are", "determined", "by", "transition", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L263-L309
train
cs50/style50
style50/_api.py
StyleCheck.count_lines
def count_lines(self, code): """ Count lines of code (by default ignores empty lines, but child could override to do more). """ return sum(bool(line.strip()) for line in code.splitlines())
python
def count_lines(self, code): """ Count lines of code (by default ignores empty lines, but child could override to do more). """ return sum(bool(line.strip()) for line in code.splitlines())
[ "def", "count_lines", "(", "self", ",", "code", ")", ":", "return", "sum", "(", "bool", "(", "line", ".", "strip", "(", ")", ")", "for", "line", "in", "code", ".", "splitlines", "(", ")", ")" ]
Count lines of code (by default ignores empty lines, but child could override to do more).
[ "Count", "lines", "of", "code", "(", "by", "default", "ignores", "empty", "lines", "but", "child", "could", "override", "to", "do", "more", ")", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L366-L370
train
cs50/style50
style50/_api.py
StyleCheck.run
def run(command, input=None, exit=0, shell=False): """ Run `command` passing it stdin from `input`, throwing a DependencyError if comand is not found. Throws Error if exit code of command is not `exit` (unless `exit` is None). """ if isinstance(input, str): input = input.encode() # Only pipe stdin if we have input to pipe. stdin = {} if input is None else {"stdin": subprocess.PIPE} try: child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **stdin) except FileNotFoundError as e: # Extract name of command. name = command.split(' ', 1)[0] if isinstance(command, str) else command[0] raise DependencyError(name) stdout, _ = child.communicate(input=input) if exit is not None and child.returncode != exit: raise Error("failed to stylecheck code") return stdout.decode()
python
def run(command, input=None, exit=0, shell=False): """ Run `command` passing it stdin from `input`, throwing a DependencyError if comand is not found. Throws Error if exit code of command is not `exit` (unless `exit` is None). """ if isinstance(input, str): input = input.encode() # Only pipe stdin if we have input to pipe. stdin = {} if input is None else {"stdin": subprocess.PIPE} try: child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **stdin) except FileNotFoundError as e: # Extract name of command. name = command.split(' ', 1)[0] if isinstance(command, str) else command[0] raise DependencyError(name) stdout, _ = child.communicate(input=input) if exit is not None and child.returncode != exit: raise Error("failed to stylecheck code") return stdout.decode()
[ "def", "run", "(", "command", ",", "input", "=", "None", ",", "exit", "=", "0", ",", "shell", "=", "False", ")", ":", "if", "isinstance", "(", "input", ",", "str", ")", ":", "input", "=", "input", ".", "encode", "(", ")", "# Only pipe stdin if we have input to pipe.", "stdin", "=", "{", "}", "if", "input", "is", "None", "else", "{", "\"stdin\"", ":", "subprocess", ".", "PIPE", "}", "try", ":", "child", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "*", "*", "stdin", ")", "except", "FileNotFoundError", "as", "e", ":", "# Extract name of command.", "name", "=", "command", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", "if", "isinstance", "(", "command", ",", "str", ")", "else", "command", "[", "0", "]", "raise", "DependencyError", "(", "name", ")", "stdout", ",", "_", "=", "child", ".", "communicate", "(", "input", "=", "input", ")", "if", "exit", "is", "not", "None", "and", "child", ".", "returncode", "!=", "exit", ":", "raise", "Error", "(", "\"failed to stylecheck code\"", ")", "return", "stdout", ".", "decode", "(", ")" ]
Run `command` passing it stdin from `input`, throwing a DependencyError if comand is not found. Throws Error if exit code of command is not `exit` (unless `exit` is None).
[ "Run", "command", "passing", "it", "stdin", "from", "input", "throwing", "a", "DependencyError", "if", "comand", "is", "not", "found", ".", "Throws", "Error", "if", "exit", "code", "of", "command", "is", "not", "exit", "(", "unless", "exit", "is", "None", ")", "." ]
2dfe5957f7b727ee5163499e7b8191275aee914c
https://github.com/cs50/style50/blob/2dfe5957f7b727ee5163499e7b8191275aee914c/style50/_api.py#L373-L394
train
Julius2342/pyvlx
pyvlx/frame_creation.py
frame_from_raw
def frame_from_raw(raw): """Create and return frame from raw bytes.""" command, payload = extract_from_frame(raw) frame = create_frame(command) if frame is None: PYVLXLOG.warning("Command %s not implemented, raw: %s", command, ":".join("{:02x}".format(c) for c in raw)) return None frame.validate_payload_len(payload) frame.from_payload(payload) return frame
python
def frame_from_raw(raw): """Create and return frame from raw bytes.""" command, payload = extract_from_frame(raw) frame = create_frame(command) if frame is None: PYVLXLOG.warning("Command %s not implemented, raw: %s", command, ":".join("{:02x}".format(c) for c in raw)) return None frame.validate_payload_len(payload) frame.from_payload(payload) return frame
[ "def", "frame_from_raw", "(", "raw", ")", ":", "command", ",", "payload", "=", "extract_from_frame", "(", "raw", ")", "frame", "=", "create_frame", "(", "command", ")", "if", "frame", "is", "None", ":", "PYVLXLOG", ".", "warning", "(", "\"Command %s not implemented, raw: %s\"", ",", "command", ",", "\":\"", ".", "join", "(", "\"{:02x}\"", ".", "format", "(", "c", ")", "for", "c", "in", "raw", ")", ")", "return", "None", "frame", ".", "validate_payload_len", "(", "payload", ")", "frame", ".", "from_payload", "(", "payload", ")", "return", "frame" ]
Create and return frame from raw bytes.
[ "Create", "and", "return", "frame", "from", "raw", "bytes", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frame_creation.py#L32-L41
train
Julius2342/pyvlx
pyvlx/frame_creation.py
create_frame
def create_frame(command): """Create and return empty Frame from Command.""" # pylint: disable=too-many-branches,too-many-return-statements if command == Command.GW_ERROR_NTF: return FrameErrorNotification() if command == Command.GW_COMMAND_SEND_REQ: return FrameCommandSendRequest() if command == Command.GW_COMMAND_SEND_CFM: return FrameCommandSendConfirmation() if command == Command.GW_COMMAND_RUN_STATUS_NTF: return FrameCommandRunStatusNotification() if command == Command.GW_COMMAND_REMAINING_TIME_NTF: return FrameCommandRemainingTimeNotification() if command == Command.GW_SESSION_FINISHED_NTF: return FrameSessionFinishedNotification() if command == Command.GW_PASSWORD_ENTER_REQ: return FramePasswordEnterRequest() if command == Command.GW_PASSWORD_ENTER_CFM: return FramePasswordEnterConfirmation() if command == Command.GW_CS_DISCOVER_NODES_REQ: return FrameDiscoverNodesRequest() if command == Command.GW_CS_DISCOVER_NODES_CFM: return FrameDiscoverNodesConfirmation() if command == Command.GW_CS_DISCOVER_NODES_NTF: return FrameDiscoverNodesNotification() if command == Command.GW_GET_SCENE_LIST_REQ: return FrameGetSceneListRequest() if command == Command.GW_GET_SCENE_LIST_CFM: return FrameGetSceneListConfirmation() if command == Command.GW_GET_SCENE_LIST_NTF: return FrameGetSceneListNotification() if command == Command.GW_GET_NODE_INFORMATION_REQ: return FrameGetNodeInformationRequest() if command == Command.GW_GET_NODE_INFORMATION_CFM: return FrameGetNodeInformationConfirmation() if command == Command.GW_GET_NODE_INFORMATION_NTF: return FrameGetNodeInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_REQ: return FrameGetAllNodesInformationRequest() if command == Command.GW_GET_ALL_NODES_INFORMATION_CFM: return FrameGetAllNodesInformationConfirmation() if command == Command.GW_GET_ALL_NODES_INFORMATION_NTF: return FrameGetAllNodesInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF: return FrameGetAllNodesInformationFinishedNotification() if command == Command.GW_ACTIVATE_SCENE_REQ: return FrameActivateSceneRequest() if command == Command.GW_ACTIVATE_SCENE_CFM: return FrameActivateSceneConfirmation() if command == Command.GW_GET_VERSION_REQ: return FrameGetVersionRequest() if command == Command.GW_GET_VERSION_CFM: return FrameGetVersionConfirmation() if command == Command.GW_GET_PROTOCOL_VERSION_REQ: return FrameGetProtocolVersionRequest() if command == Command.GW_GET_PROTOCOL_VERSION_CFM: return FrameGetProtocolVersionConfirmation() if command == Command.GW_SET_NODE_NAME_REQ: return FrameSetNodeNameRequest() if command == Command.GW_SET_NODE_NAME_CFM: return FrameSetNodeNameConfirmation() if command == Command.GW_NODE_INFORMATION_CHANGED_NTF: return FrameNodeInformationChangedNotification() if command == Command.GW_GET_STATE_REQ: return FrameGetStateRequest() if command == Command.GW_GET_STATE_CFM: return FrameGetStateConfirmation() if command == Command.GW_SET_UTC_REQ: return FrameSetUTCRequest() if command == Command.GW_SET_UTC_CFM: return FrameSetUTCConfirmation() if command == Command.GW_ACTIVATION_LOG_UPDATED_NTF: return FrameActivationLogUpdatedNotification() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_REQ: return FrameHouseStatusMonitorEnableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_CFM: return FrameHouseStatusMonitorEnableConfirmation() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_REQ: return FrameHouseStatusMonitorDisableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_CFM: return FrameHouseStatusMonitorDisableConfirmation() if command == Command.GW_NODE_STATE_POSITION_CHANGED_NTF: return FrameNodeStatePositionChangedNotification() return None
python
def create_frame(command): """Create and return empty Frame from Command.""" # pylint: disable=too-many-branches,too-many-return-statements if command == Command.GW_ERROR_NTF: return FrameErrorNotification() if command == Command.GW_COMMAND_SEND_REQ: return FrameCommandSendRequest() if command == Command.GW_COMMAND_SEND_CFM: return FrameCommandSendConfirmation() if command == Command.GW_COMMAND_RUN_STATUS_NTF: return FrameCommandRunStatusNotification() if command == Command.GW_COMMAND_REMAINING_TIME_NTF: return FrameCommandRemainingTimeNotification() if command == Command.GW_SESSION_FINISHED_NTF: return FrameSessionFinishedNotification() if command == Command.GW_PASSWORD_ENTER_REQ: return FramePasswordEnterRequest() if command == Command.GW_PASSWORD_ENTER_CFM: return FramePasswordEnterConfirmation() if command == Command.GW_CS_DISCOVER_NODES_REQ: return FrameDiscoverNodesRequest() if command == Command.GW_CS_DISCOVER_NODES_CFM: return FrameDiscoverNodesConfirmation() if command == Command.GW_CS_DISCOVER_NODES_NTF: return FrameDiscoverNodesNotification() if command == Command.GW_GET_SCENE_LIST_REQ: return FrameGetSceneListRequest() if command == Command.GW_GET_SCENE_LIST_CFM: return FrameGetSceneListConfirmation() if command == Command.GW_GET_SCENE_LIST_NTF: return FrameGetSceneListNotification() if command == Command.GW_GET_NODE_INFORMATION_REQ: return FrameGetNodeInformationRequest() if command == Command.GW_GET_NODE_INFORMATION_CFM: return FrameGetNodeInformationConfirmation() if command == Command.GW_GET_NODE_INFORMATION_NTF: return FrameGetNodeInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_REQ: return FrameGetAllNodesInformationRequest() if command == Command.GW_GET_ALL_NODES_INFORMATION_CFM: return FrameGetAllNodesInformationConfirmation() if command == Command.GW_GET_ALL_NODES_INFORMATION_NTF: return FrameGetAllNodesInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF: return FrameGetAllNodesInformationFinishedNotification() if command == Command.GW_ACTIVATE_SCENE_REQ: return FrameActivateSceneRequest() if command == Command.GW_ACTIVATE_SCENE_CFM: return FrameActivateSceneConfirmation() if command == Command.GW_GET_VERSION_REQ: return FrameGetVersionRequest() if command == Command.GW_GET_VERSION_CFM: return FrameGetVersionConfirmation() if command == Command.GW_GET_PROTOCOL_VERSION_REQ: return FrameGetProtocolVersionRequest() if command == Command.GW_GET_PROTOCOL_VERSION_CFM: return FrameGetProtocolVersionConfirmation() if command == Command.GW_SET_NODE_NAME_REQ: return FrameSetNodeNameRequest() if command == Command.GW_SET_NODE_NAME_CFM: return FrameSetNodeNameConfirmation() if command == Command.GW_NODE_INFORMATION_CHANGED_NTF: return FrameNodeInformationChangedNotification() if command == Command.GW_GET_STATE_REQ: return FrameGetStateRequest() if command == Command.GW_GET_STATE_CFM: return FrameGetStateConfirmation() if command == Command.GW_SET_UTC_REQ: return FrameSetUTCRequest() if command == Command.GW_SET_UTC_CFM: return FrameSetUTCConfirmation() if command == Command.GW_ACTIVATION_LOG_UPDATED_NTF: return FrameActivationLogUpdatedNotification() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_REQ: return FrameHouseStatusMonitorEnableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_CFM: return FrameHouseStatusMonitorEnableConfirmation() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_REQ: return FrameHouseStatusMonitorDisableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_CFM: return FrameHouseStatusMonitorDisableConfirmation() if command == Command.GW_NODE_STATE_POSITION_CHANGED_NTF: return FrameNodeStatePositionChangedNotification() return None
[ "def", "create_frame", "(", "command", ")", ":", "# pylint: disable=too-many-branches,too-many-return-statements", "if", "command", "==", "Command", ".", "GW_ERROR_NTF", ":", "return", "FrameErrorNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_COMMAND_SEND_REQ", ":", "return", "FrameCommandSendRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_COMMAND_SEND_CFM", ":", "return", "FrameCommandSendConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_COMMAND_RUN_STATUS_NTF", ":", "return", "FrameCommandRunStatusNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_COMMAND_REMAINING_TIME_NTF", ":", "return", "FrameCommandRemainingTimeNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_SESSION_FINISHED_NTF", ":", "return", "FrameSessionFinishedNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_PASSWORD_ENTER_REQ", ":", "return", "FramePasswordEnterRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_PASSWORD_ENTER_CFM", ":", "return", "FramePasswordEnterConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_CS_DISCOVER_NODES_REQ", ":", "return", "FrameDiscoverNodesRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_CS_DISCOVER_NODES_CFM", ":", "return", "FrameDiscoverNodesConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_CS_DISCOVER_NODES_NTF", ":", "return", "FrameDiscoverNodesNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_SCENE_LIST_REQ", ":", "return", "FrameGetSceneListRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_SCENE_LIST_CFM", ":", "return", "FrameGetSceneListConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_SCENE_LIST_NTF", ":", "return", "FrameGetSceneListNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_NODE_INFORMATION_REQ", ":", "return", "FrameGetNodeInformationRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_NODE_INFORMATION_CFM", ":", "return", "FrameGetNodeInformationConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_NODE_INFORMATION_NTF", ":", "return", "FrameGetNodeInformationNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_ALL_NODES_INFORMATION_REQ", ":", "return", "FrameGetAllNodesInformationRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_ALL_NODES_INFORMATION_CFM", ":", "return", "FrameGetAllNodesInformationConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_ALL_NODES_INFORMATION_NTF", ":", "return", "FrameGetAllNodesInformationNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF", ":", "return", "FrameGetAllNodesInformationFinishedNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_ACTIVATE_SCENE_REQ", ":", "return", "FrameActivateSceneRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_ACTIVATE_SCENE_CFM", ":", "return", "FrameActivateSceneConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_VERSION_REQ", ":", "return", "FrameGetVersionRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_VERSION_CFM", ":", "return", "FrameGetVersionConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_PROTOCOL_VERSION_REQ", ":", "return", "FrameGetProtocolVersionRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_PROTOCOL_VERSION_CFM", ":", "return", "FrameGetProtocolVersionConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_SET_NODE_NAME_REQ", ":", "return", "FrameSetNodeNameRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_SET_NODE_NAME_CFM", ":", "return", "FrameSetNodeNameConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_NODE_INFORMATION_CHANGED_NTF", ":", "return", "FrameNodeInformationChangedNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_STATE_REQ", ":", "return", "FrameGetStateRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_GET_STATE_CFM", ":", "return", "FrameGetStateConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_SET_UTC_REQ", ":", "return", "FrameSetUTCRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_SET_UTC_CFM", ":", "return", "FrameSetUTCConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_ACTIVATION_LOG_UPDATED_NTF", ":", "return", "FrameActivationLogUpdatedNotification", "(", ")", "if", "command", "==", "Command", ".", "GW_HOUSE_STATUS_MONITOR_ENABLE_REQ", ":", "return", "FrameHouseStatusMonitorEnableRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_HOUSE_STATUS_MONITOR_ENABLE_CFM", ":", "return", "FrameHouseStatusMonitorEnableConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_HOUSE_STATUS_MONITOR_DISABLE_REQ", ":", "return", "FrameHouseStatusMonitorDisableRequest", "(", ")", "if", "command", "==", "Command", ".", "GW_HOUSE_STATUS_MONITOR_DISABLE_CFM", ":", "return", "FrameHouseStatusMonitorDisableConfirmation", "(", ")", "if", "command", "==", "Command", ".", "GW_NODE_STATE_POSITION_CHANGED_NTF", ":", "return", "FrameNodeStatePositionChangedNotification", "(", ")", "return", "None" ]
Create and return empty Frame from Command.
[ "Create", "and", "return", "empty", "Frame", "from", "Command", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frame_creation.py#L44-L142
train
Julius2342/pyvlx
pyvlx/login.py
Login.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FramePasswordEnterConfirmation): return False if frame.status == PasswordEnterConfirmationStatus.FAILED: PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2]) self.success = False if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL: self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FramePasswordEnterConfirmation): return False if frame.status == PasswordEnterConfirmationStatus.FAILED: PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2]) self.success = False if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL: self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FramePasswordEnterConfirmation", ")", ":", "return", "False", "if", "frame", ".", "status", "==", "PasswordEnterConfirmationStatus", ".", "FAILED", ":", "PYVLXLOG", ".", "warning", "(", "'Failed to authenticate with password \"%s****\"'", ",", "self", ".", "password", "[", ":", "2", "]", ")", "self", ".", "success", "=", "False", "if", "frame", ".", "status", "==", "PasswordEnterConfirmationStatus", ".", "SUCCESSFUL", ":", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/login.py#L18-L27
train
Julius2342/pyvlx
pyvlx/frames/frame_get_protocol_version.py
FrameGetProtocolVersionConfirmation.get_payload
def get_payload(self): """Return Payload.""" return bytes( [self.major_version >> 8 & 255, self.major_version & 255, self.minor_version >> 8 & 255, self.minor_version & 255])
python
def get_payload(self): """Return Payload.""" return bytes( [self.major_version >> 8 & 255, self.major_version & 255, self.minor_version >> 8 & 255, self.minor_version & 255])
[ "def", "get_payload", "(", "self", ")", ":", "return", "bytes", "(", "[", "self", ".", "major_version", ">>", "8", "&", "255", ",", "self", ".", "major_version", "&", "255", ",", "self", ".", "minor_version", ">>", "8", "&", "255", ",", "self", ".", "minor_version", "&", "255", "]", ")" ]
Return Payload.
[ "Return", "Payload", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_protocol_version.py#L33-L37
train
Julius2342/pyvlx
pyvlx/frames/frame_get_protocol_version.py
FrameGetProtocolVersionConfirmation.from_payload
def from_payload(self, payload): """Init frame from binary data.""" self.major_version = payload[0] * 256 + payload[1] self.minor_version = payload[2] * 256 + payload[3]
python
def from_payload(self, payload): """Init frame from binary data.""" self.major_version = payload[0] * 256 + payload[1] self.minor_version = payload[2] * 256 + payload[3]
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "major_version", "=", "payload", "[", "0", "]", "*", "256", "+", "payload", "[", "1", "]", "self", ".", "minor_version", "=", "payload", "[", "2", "]", "*", "256", "+", "payload", "[", "3", "]" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_protocol_version.py#L39-L42
train
Julius2342/pyvlx
pyvlx/connection.py
TCPTransport.data_received
def data_received(self, data): """Handle data received.""" self.tokenizer.feed(data) while self.tokenizer.has_tokens(): raw = self.tokenizer.get_next_token() frame = frame_from_raw(raw) if frame is not None: self.frame_received_cb(frame)
python
def data_received(self, data): """Handle data received.""" self.tokenizer.feed(data) while self.tokenizer.has_tokens(): raw = self.tokenizer.get_next_token() frame = frame_from_raw(raw) if frame is not None: self.frame_received_cb(frame)
[ "def", "data_received", "(", "self", ",", "data", ")", ":", "self", ".", "tokenizer", ".", "feed", "(", "data", ")", "while", "self", ".", "tokenizer", ".", "has_tokens", "(", ")", ":", "raw", "=", "self", ".", "tokenizer", ".", "get_next_token", "(", ")", "frame", "=", "frame_from_raw", "(", "raw", ")", "if", "frame", "is", "not", "None", ":", "self", ".", "frame_received_cb", "(", "frame", ")" ]
Handle data received.
[ "Handle", "data", "received", "." ]
ee78e1324bcb1be5b8d1a9d05ab5496b72eae848
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/connection.py#L47-L54
train