repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
rshipp/python-dshield
dshield.py
daily404summary
def daily404summary(date, return_format=None): """Returns daily summary information of submitted 404 Error Page Information. :param date: string or datetime.date() (required) """ uri = 'daily404summary' if date: try: uri = '/'.join([uri, date.strftime("%Y-%m-%d")]) except AttributeError: uri = '/'.join([uri, date]) return _get(uri, return_format)
python
def daily404summary(date, return_format=None): """Returns daily summary information of submitted 404 Error Page Information. :param date: string or datetime.date() (required) """ uri = 'daily404summary' if date: try: uri = '/'.join([uri, date.strftime("%Y-%m-%d")]) except AttributeError: uri = '/'.join([uri, date]) return _get(uri, return_format)
[ "def", "daily404summary", "(", "date", ",", "return_format", "=", "None", ")", ":", "uri", "=", "'daily404summary'", "if", "date", ":", "try", ":", "uri", "=", "'/'", ".", "join", "(", "[", "uri", ",", "date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")",...
Returns daily summary information of submitted 404 Error Page Information. :param date: string or datetime.date() (required)
[ "Returns", "daily", "summary", "information", "of", "submitted", "404", "Error", "Page", "Information", "." ]
1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0
https://github.com/rshipp/python-dshield/blob/1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0/dshield.py#L234-L246
train
50,400
rshipp/python-dshield
dshield.py
daily404detail
def daily404detail(date, limit=None, return_format=None): """Returns detail information of submitted 404 Error Page Information. :param date: string or datetime.date() (required) :param limit: string or int, limit for number of returned items """ uri = 'daily404detail' if date: try: uri = '/'.join([uri, date.strftime("%Y-%m-%d")]) except AttributeError: uri = '/'.join([uri, date]) if limit: uri = '/'.join([uri, str(limit)]) return _get(uri, return_format)
python
def daily404detail(date, limit=None, return_format=None): """Returns detail information of submitted 404 Error Page Information. :param date: string or datetime.date() (required) :param limit: string or int, limit for number of returned items """ uri = 'daily404detail' if date: try: uri = '/'.join([uri, date.strftime("%Y-%m-%d")]) except AttributeError: uri = '/'.join([uri, date]) if limit: uri = '/'.join([uri, str(limit)]) return _get(uri, return_format)
[ "def", "daily404detail", "(", "date", ",", "limit", "=", "None", ",", "return_format", "=", "None", ")", ":", "uri", "=", "'daily404detail'", "if", "date", ":", "try", ":", "uri", "=", "'/'", ".", "join", "(", "[", "uri", ",", "date", ".", "strftime"...
Returns detail information of submitted 404 Error Page Information. :param date: string or datetime.date() (required) :param limit: string or int, limit for number of returned items
[ "Returns", "detail", "information", "of", "submitted", "404", "Error", "Page", "Information", "." ]
1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0
https://github.com/rshipp/python-dshield/blob/1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0/dshield.py#L248-L262
train
50,401
rshipp/python-dshield
dshield.py
glossary
def glossary(term=None, return_format=None): """List of glossary terms and definitions. :param term: a whole or parital word to "search" in the API """ uri = 'glossary' if term: uri = '/'.join([uri, term]) return _get(uri, return_format)
python
def glossary(term=None, return_format=None): """List of glossary terms and definitions. :param term: a whole or parital word to "search" in the API """ uri = 'glossary' if term: uri = '/'.join([uri, term]) return _get(uri, return_format)
[ "def", "glossary", "(", "term", "=", "None", ",", "return_format", "=", "None", ")", ":", "uri", "=", "'glossary'", "if", "term", ":", "uri", "=", "'/'", ".", "join", "(", "[", "uri", ",", "term", "]", ")", "return", "_get", "(", "uri", ",", "ret...
List of glossary terms and definitions. :param term: a whole or parital word to "search" in the API
[ "List", "of", "glossary", "terms", "and", "definitions", "." ]
1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0
https://github.com/rshipp/python-dshield/blob/1b003d0dfac0bc2ee8b86ca5f1a44b765b8cc6e0/dshield.py#L264-L272
train
50,402
albertyw/syspath
syspath/syspath.py
_append_path
def _append_path(new_path): # type: (str) -> None """ Given a path string, append it to sys.path """ for path in sys.path: path = os.path.abspath(path) if new_path == path: return sys.path.append(new_path)
python
def _append_path(new_path): # type: (str) -> None """ Given a path string, append it to sys.path """ for path in sys.path: path = os.path.abspath(path) if new_path == path: return sys.path.append(new_path)
[ "def", "_append_path", "(", "new_path", ")", ":", "# type: (str) -> None", "for", "path", "in", "sys", ".", "path", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "new_path", "==", "path", ":", "return", "sys", ".", "path"...
Given a path string, append it to sys.path
[ "Given", "a", "path", "string", "append", "it", "to", "sys", ".", "path" ]
af219aecfecb1ef3130165121dcad6d2e1a269b7
https://github.com/albertyw/syspath/blob/af219aecfecb1ef3130165121dcad6d2e1a269b7/syspath/syspath.py#L6-L12
train
50,403
albertyw/syspath
syspath/syspath.py
_caller_path
def _caller_path(index): # type: (int) -> str """ Get the caller's file path, by the index of the stack, does not work when the caller is stdin through a CLI python """ module = None stack = inspect.stack() while not module: if index >= len(stack): raise RuntimeError("Cannot find import path") frame = stack[index] module = inspect.getmodule(frame[0]) index += 1 filename = module.__file__ path = os.path.dirname(os.path.realpath(filename)) return path
python
def _caller_path(index): # type: (int) -> str """ Get the caller's file path, by the index of the stack, does not work when the caller is stdin through a CLI python """ module = None stack = inspect.stack() while not module: if index >= len(stack): raise RuntimeError("Cannot find import path") frame = stack[index] module = inspect.getmodule(frame[0]) index += 1 filename = module.__file__ path = os.path.dirname(os.path.realpath(filename)) return path
[ "def", "_caller_path", "(", "index", ")", ":", "# type: (int) -> str", "module", "=", "None", "stack", "=", "inspect", ".", "stack", "(", ")", "while", "not", "module", ":", "if", "index", ">=", "len", "(", "stack", ")", ":", "raise", "RuntimeError", "("...
Get the caller's file path, by the index of the stack, does not work when the caller is stdin through a CLI python
[ "Get", "the", "caller", "s", "file", "path", "by", "the", "index", "of", "the", "stack", "does", "not", "work", "when", "the", "caller", "is", "stdin", "through", "a", "CLI", "python" ]
af219aecfecb1ef3130165121dcad6d2e1a269b7
https://github.com/albertyw/syspath/blob/af219aecfecb1ef3130165121dcad6d2e1a269b7/syspath/syspath.py#L15-L30
train
50,404
albertyw/syspath
syspath/syspath.py
get_current_path
def get_current_path(index=2): # type: (int) -> str """ Get the caller's path to sys.path If the caller is a CLI through stdin, the current working directory is used """ try: path = _caller_path(index) except RuntimeError: path = os.getcwd() return path
python
def get_current_path(index=2): # type: (int) -> str """ Get the caller's path to sys.path If the caller is a CLI through stdin, the current working directory is used """ try: path = _caller_path(index) except RuntimeError: path = os.getcwd() return path
[ "def", "get_current_path", "(", "index", "=", "2", ")", ":", "# type: (int) -> str", "try", ":", "path", "=", "_caller_path", "(", "index", ")", "except", "RuntimeError", ":", "path", "=", "os", ".", "getcwd", "(", ")", "return", "path" ]
Get the caller's path to sys.path If the caller is a CLI through stdin, the current working directory is used
[ "Get", "the", "caller", "s", "path", "to", "sys", ".", "path", "If", "the", "caller", "is", "a", "CLI", "through", "stdin", "the", "current", "working", "directory", "is", "used" ]
af219aecfecb1ef3130165121dcad6d2e1a269b7
https://github.com/albertyw/syspath/blob/af219aecfecb1ef3130165121dcad6d2e1a269b7/syspath/syspath.py#L33-L42
train
50,405
albertyw/syspath
syspath/syspath.py
get_git_root
def get_git_root(index=3): # type: (int) -> str """ Get the path of the git root directory of the caller's file Raises a RuntimeError if a git repository cannot be found """ path = get_current_path(index=index) while True: git_path = os.path.join(path, '.git') if os.path.isdir(git_path): return path if os.path.dirname(path) == path: raise RuntimeError("Cannot find git root") path = os.path.split(path)[0]
python
def get_git_root(index=3): # type: (int) -> str """ Get the path of the git root directory of the caller's file Raises a RuntimeError if a git repository cannot be found """ path = get_current_path(index=index) while True: git_path = os.path.join(path, '.git') if os.path.isdir(git_path): return path if os.path.dirname(path) == path: raise RuntimeError("Cannot find git root") path = os.path.split(path)[0]
[ "def", "get_git_root", "(", "index", "=", "3", ")", ":", "# type: (int) -> str", "path", "=", "get_current_path", "(", "index", "=", "index", ")", "while", "True", ":", "git_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'.git'", ")", "...
Get the path of the git root directory of the caller's file Raises a RuntimeError if a git repository cannot be found
[ "Get", "the", "path", "of", "the", "git", "root", "directory", "of", "the", "caller", "s", "file", "Raises", "a", "RuntimeError", "if", "a", "git", "repository", "cannot", "be", "found" ]
af219aecfecb1ef3130165121dcad6d2e1a269b7
https://github.com/albertyw/syspath/blob/af219aecfecb1ef3130165121dcad6d2e1a269b7/syspath/syspath.py#L55-L67
train
50,406
albertyw/syspath
syspath/syspath.py
get_parent_path
def get_parent_path(index=2): # type: (int) -> str """ Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used """ try: path = _caller_path(index) except RuntimeError: path = os.getcwd() path = os.path.abspath(os.path.join(path, os.pardir)) return path
python
def get_parent_path(index=2): # type: (int) -> str """ Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used """ try: path = _caller_path(index) except RuntimeError: path = os.getcwd() path = os.path.abspath(os.path.join(path, os.pardir)) return path
[ "def", "get_parent_path", "(", "index", "=", "2", ")", ":", "# type: (int) -> str", "try", ":", "path", "=", "_caller_path", "(", "index", ")", "except", "RuntimeError", ":", "path", "=", "os", ".", "getcwd", "(", ")", "path", "=", "os", ".", "path", "...
Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used
[ "Get", "the", "caller", "s", "parent", "path", "to", "sys", ".", "path", "If", "the", "caller", "is", "a", "CLI", "through", "stdin", "the", "parent", "of", "the", "current", "working", "directory", "is", "used" ]
af219aecfecb1ef3130165121dcad6d2e1a269b7
https://github.com/albertyw/syspath/blob/af219aecfecb1ef3130165121dcad6d2e1a269b7/syspath/syspath.py#L80-L91
train
50,407
deployed/django-emailtemplates
emailtemplates/email.py
EmailFromTemplate.send_email
def send_email(self, send_to, attachment_paths=None, fail_silently=True, *args, **kwargs): """ Sends email to recipient based on self object parameters. @param fail_silently: When it’s False, msg.send() will raise an smtplib.SMTPException if an error occurs. @param send_to: recipient email @param args: additional args passed to EmailMessage @param kwargs: kwargs passed to EmailMessage @param attachment_paths: paths to attachments as received by django EmailMessage.attach_file(path) method @return: number of sent messages """ msg = self.get_message_object(send_to, attachment_paths, *args, **kwargs) msg.content_subtype = self.content_subtype try: self.sent = msg.send() except SMTPException, e: if not fail_silently: raise logger.error(u'Problem sending email to %s: %s', send_to, e) return self.sent
python
def send_email(self, send_to, attachment_paths=None, fail_silently=True, *args, **kwargs): """ Sends email to recipient based on self object parameters. @param fail_silently: When it’s False, msg.send() will raise an smtplib.SMTPException if an error occurs. @param send_to: recipient email @param args: additional args passed to EmailMessage @param kwargs: kwargs passed to EmailMessage @param attachment_paths: paths to attachments as received by django EmailMessage.attach_file(path) method @return: number of sent messages """ msg = self.get_message_object(send_to, attachment_paths, *args, **kwargs) msg.content_subtype = self.content_subtype try: self.sent = msg.send() except SMTPException, e: if not fail_silently: raise logger.error(u'Problem sending email to %s: %s', send_to, e) return self.sent
[ "def", "send_email", "(", "self", ",", "send_to", ",", "attachment_paths", "=", "None", ",", "fail_silently", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", "=", "self", ".", "get_message_object", "(", "send_to", ",", "attachmen...
Sends email to recipient based on self object parameters. @param fail_silently: When it’s False, msg.send() will raise an smtplib.SMTPException if an error occurs. @param send_to: recipient email @param args: additional args passed to EmailMessage @param kwargs: kwargs passed to EmailMessage @param attachment_paths: paths to attachments as received by django EmailMessage.attach_file(path) method @return: number of sent messages
[ "Sends", "email", "to", "recipient", "based", "on", "self", "object", "parameters", "." ]
0e95139989dbcf7e624153ddcd7b5b66b48eb6eb
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/email.py#L127-L148
train
50,408
commontk/ctk-cli
ctk_cli/module.py
_tag
def _tag(element): """Return element.tag with xmlns stripped away.""" tag = element.tag if tag[0] == "{": uri, tag = tag[1:].split("}") return tag
python
def _tag(element): """Return element.tag with xmlns stripped away.""" tag = element.tag if tag[0] == "{": uri, tag = tag[1:].split("}") return tag
[ "def", "_tag", "(", "element", ")", ":", "tag", "=", "element", ".", "tag", "if", "tag", "[", "0", "]", "==", "\"{\"", ":", "uri", ",", "tag", "=", "tag", "[", "1", ":", "]", ".", "split", "(", "\"}\"", ")", "return", "tag" ]
Return element.tag with xmlns stripped away.
[ "Return", "element", ".", "tag", "with", "xmlns", "stripped", "away", "." ]
ddd8de62b586491ad6e6750133cc1f0e11f37b11
https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/module.py#L23-L28
train
50,409
commontk/ctk-cli
ctk_cli/module.py
_uriPrefix
def _uriPrefix(element): """Return xmlns prefix of the given element.""" i = element.tag.find('}') if i < 0: return "" return element.tag[:i+1]
python
def _uriPrefix(element): """Return xmlns prefix of the given element.""" i = element.tag.find('}') if i < 0: return "" return element.tag[:i+1]
[ "def", "_uriPrefix", "(", "element", ")", ":", "i", "=", "element", ".", "tag", ".", "find", "(", "'}'", ")", "if", "i", "<", "0", ":", "return", "\"\"", "return", "element", ".", "tag", "[", ":", "i", "+", "1", "]" ]
Return xmlns prefix of the given element.
[ "Return", "xmlns", "prefix", "of", "the", "given", "element", "." ]
ddd8de62b586491ad6e6750133cc1f0e11f37b11
https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/module.py#L30-L35
train
50,410
commontk/ctk-cli
ctk_cli/module.py
CLIParameter.parseValue
def parseValue(self, value): """Parse the given value and return result.""" if self.isVector(): return list(map(self._pythonType, value.split(','))) if self.typ == 'boolean': return _parseBool(value) return self._pythonType(value)
python
def parseValue(self, value): """Parse the given value and return result.""" if self.isVector(): return list(map(self._pythonType, value.split(','))) if self.typ == 'boolean': return _parseBool(value) return self._pythonType(value)
[ "def", "parseValue", "(", "self", ",", "value", ")", ":", "if", "self", ".", "isVector", "(", ")", ":", "return", "list", "(", "map", "(", "self", ".", "_pythonType", ",", "value", ".", "split", "(", "','", ")", ")", ")", "if", "self", ".", "typ"...
Parse the given value and return result.
[ "Parse", "the", "given", "value", "and", "return", "result", "." ]
ddd8de62b586491ad6e6750133cc1f0e11f37b11
https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/module.py#L259-L265
train
50,411
commontk/ctk-cli
ctk_cli/module.py
CLIParameter.defaultExtension
def defaultExtension(self): """Return default extension for this parameter type, checked against supported fileExtensions. If the default extension is not within `fileExtensions`, return the first supported extension.""" result = self.EXTERNAL_TYPES[self.typ] if not self.fileExtensions: return result if result in self.fileExtensions: return result return self.fileExtensions[0]
python
def defaultExtension(self): """Return default extension for this parameter type, checked against supported fileExtensions. If the default extension is not within `fileExtensions`, return the first supported extension.""" result = self.EXTERNAL_TYPES[self.typ] if not self.fileExtensions: return result if result in self.fileExtensions: return result return self.fileExtensions[0]
[ "def", "defaultExtension", "(", "self", ")", ":", "result", "=", "self", ".", "EXTERNAL_TYPES", "[", "self", ".", "typ", "]", "if", "not", "self", ".", "fileExtensions", ":", "return", "result", "if", "result", "in", "self", ".", "fileExtensions", ":", "...
Return default extension for this parameter type, checked against supported fileExtensions. If the default extension is not within `fileExtensions`, return the first supported extension.
[ "Return", "default", "extension", "for", "this", "parameter", "type", "checked", "against", "supported", "fileExtensions", ".", "If", "the", "default", "extension", "is", "not", "within", "fileExtensions", "return", "the", "first", "supported", "extension", "." ]
ddd8de62b586491ad6e6750133cc1f0e11f37b11
https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/module.py#L291-L301
train
50,412
oscarlazoarjona/fast
fast/symbolic.py
define_symbol
def define_symbol(name, open_brace, comma, i, j, close_brace, variables, **kwds): r"""Define a nice symbol with matrix indices. >>> name = "rho" >>> from sympy import symbols >>> t, x, y, z = symbols("t, x, y, z", positive=True) >>> variables = [t, x, y, z] >>> open_brace = "" >>> comma = "" >>> close_brace = "" >>> i = 0 >>> j = 1 >>> f = define_symbol(name, open_brace, comma, i, j, close_brace, ... variables, positive=True) >>> print f rho12(t, x, y, z) """ if variables is None: return Symbol(name+open_brace+str(i+1)+comma+str(j+1) + close_brace, **kwds) else: return Function(name+open_brace+str(i+1)+comma+str(j+1) + close_brace, **kwds)(*variables)
python
def define_symbol(name, open_brace, comma, i, j, close_brace, variables, **kwds): r"""Define a nice symbol with matrix indices. >>> name = "rho" >>> from sympy import symbols >>> t, x, y, z = symbols("t, x, y, z", positive=True) >>> variables = [t, x, y, z] >>> open_brace = "" >>> comma = "" >>> close_brace = "" >>> i = 0 >>> j = 1 >>> f = define_symbol(name, open_brace, comma, i, j, close_brace, ... variables, positive=True) >>> print f rho12(t, x, y, z) """ if variables is None: return Symbol(name+open_brace+str(i+1)+comma+str(j+1) + close_brace, **kwds) else: return Function(name+open_brace+str(i+1)+comma+str(j+1) + close_brace, **kwds)(*variables)
[ "def", "define_symbol", "(", "name", ",", "open_brace", ",", "comma", ",", "i", ",", "j", ",", "close_brace", ",", "variables", ",", "*", "*", "kwds", ")", ":", "if", "variables", "is", "None", ":", "return", "Symbol", "(", "name", "+", "open_brace", ...
r"""Define a nice symbol with matrix indices. >>> name = "rho" >>> from sympy import symbols >>> t, x, y, z = symbols("t, x, y, z", positive=True) >>> variables = [t, x, y, z] >>> open_brace = "" >>> comma = "" >>> close_brace = "" >>> i = 0 >>> j = 1 >>> f = define_symbol(name, open_brace, comma, i, j, close_brace, ... variables, positive=True) >>> print f rho12(t, x, y, z)
[ "r", "Define", "a", "nice", "symbol", "with", "matrix", "indices", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L59-L83
train
50,413
oscarlazoarjona/fast
fast/symbolic.py
cartesian_to_helicity
def cartesian_to_helicity(vector, numeric=False): r"""This function takes vectors from the cartesian basis to the helicity basis. For instance, we can check what are the vectors of the helicity basis. >>> from sympy import pi >>> em=polarization_vector(phi=0, theta= 0, alpha=0, beta=-pi/8,p= 1) >>> em Matrix([ [ sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(em) Matrix([ [ 0], [ 0], [-1]]) >>> e0=polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta=0,p=1) >>> e0 Matrix([ [0], [0], [1]]) >>> cartesian_to_helicity(e0) Matrix([ [0], [1], [0]]) >>> ep=polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p= 1) >>> ep Matrix([ [ -sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(ep) Matrix([ [-1], [ 0], [ 0]]) Note that vectors in the helicity basis are built in a weird way by convention: .. math:: \vec{a} = -a_{+1}\vec{e}_{-1} +a_0\vec{e}_0 -a_{-1}\vec{e}_{+1} >>> from sympy import symbols >>> am,a0,ap = symbols("am a0 ap") >>> a=-ap*em +a0*e0 -am*ep >>> a Matrix([ [ sqrt(2)*am/2 - sqrt(2)*ap/2], [sqrt(2)*I*am/2 + sqrt(2)*I*ap/2], [ a0]]) >>> cartesian_to_helicity(a).expand() Matrix([ [am], [a0], [ap]]) We can also convert a numeric array >>> r =[[[0.0, 1.0], ... [1.0, 0.0]], ... [[0.0, -1j], ... [ 1j, 0.0]], ... [[1.0, 0.0], ... [0.0,-1.0]]] >>> cartesian_to_helicity(r, numeric=True) array([[[ 0. +0.j, 0. +0.j], [ 1.4142+0.j, 0. +0.j]], <BLANKLINE> [[ 1. +0.j, 0. +0.j], [ 0. +0.j, -1. +0.j]], <BLANKLINE> [[-0. +0.j, -1.4142+0.j], [-0. +0.j, -0. +0.j]]]) """ if numeric: vector = list(vector) vector[0] = nparray(vector[0]) vector[1] = nparray(vector[1]) vector[2] = nparray(vector[2]) v = [(vector[0]-1j*vector[1])/npsqrt(2), vector[2], -(vector[0]+1j*vector[1])/npsqrt(2)] v = nparray(v) else: v = [(vector[0]-I*vector[1])/sqrt(2), vector[2], -(vector[0]+I*vector[1])/sqrt(2)] if type(vector[0]) in [type(Matrix([1, 0])), type(nparray([1, 0]))]: return v else: return Matrix(v)
python
def cartesian_to_helicity(vector, numeric=False): r"""This function takes vectors from the cartesian basis to the helicity basis. For instance, we can check what are the vectors of the helicity basis. >>> from sympy import pi >>> em=polarization_vector(phi=0, theta= 0, alpha=0, beta=-pi/8,p= 1) >>> em Matrix([ [ sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(em) Matrix([ [ 0], [ 0], [-1]]) >>> e0=polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta=0,p=1) >>> e0 Matrix([ [0], [0], [1]]) >>> cartesian_to_helicity(e0) Matrix([ [0], [1], [0]]) >>> ep=polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p= 1) >>> ep Matrix([ [ -sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(ep) Matrix([ [-1], [ 0], [ 0]]) Note that vectors in the helicity basis are built in a weird way by convention: .. math:: \vec{a} = -a_{+1}\vec{e}_{-1} +a_0\vec{e}_0 -a_{-1}\vec{e}_{+1} >>> from sympy import symbols >>> am,a0,ap = symbols("am a0 ap") >>> a=-ap*em +a0*e0 -am*ep >>> a Matrix([ [ sqrt(2)*am/2 - sqrt(2)*ap/2], [sqrt(2)*I*am/2 + sqrt(2)*I*ap/2], [ a0]]) >>> cartesian_to_helicity(a).expand() Matrix([ [am], [a0], [ap]]) We can also convert a numeric array >>> r =[[[0.0, 1.0], ... [1.0, 0.0]], ... [[0.0, -1j], ... [ 1j, 0.0]], ... [[1.0, 0.0], ... [0.0,-1.0]]] >>> cartesian_to_helicity(r, numeric=True) array([[[ 0. +0.j, 0. +0.j], [ 1.4142+0.j, 0. +0.j]], <BLANKLINE> [[ 1. +0.j, 0. +0.j], [ 0. +0.j, -1. +0.j]], <BLANKLINE> [[-0. +0.j, -1.4142+0.j], [-0. +0.j, -0. +0.j]]]) """ if numeric: vector = list(vector) vector[0] = nparray(vector[0]) vector[1] = nparray(vector[1]) vector[2] = nparray(vector[2]) v = [(vector[0]-1j*vector[1])/npsqrt(2), vector[2], -(vector[0]+1j*vector[1])/npsqrt(2)] v = nparray(v) else: v = [(vector[0]-I*vector[1])/sqrt(2), vector[2], -(vector[0]+I*vector[1])/sqrt(2)] if type(vector[0]) in [type(Matrix([1, 0])), type(nparray([1, 0]))]: return v else: return Matrix(v)
[ "def", "cartesian_to_helicity", "(", "vector", ",", "numeric", "=", "False", ")", ":", "if", "numeric", ":", "vector", "=", "list", "(", "vector", ")", "vector", "[", "0", "]", "=", "nparray", "(", "vector", "[", "0", "]", ")", "vector", "[", "1", ...
r"""This function takes vectors from the cartesian basis to the helicity basis. For instance, we can check what are the vectors of the helicity basis. >>> from sympy import pi >>> em=polarization_vector(phi=0, theta= 0, alpha=0, beta=-pi/8,p= 1) >>> em Matrix([ [ sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(em) Matrix([ [ 0], [ 0], [-1]]) >>> e0=polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta=0,p=1) >>> e0 Matrix([ [0], [0], [1]]) >>> cartesian_to_helicity(e0) Matrix([ [0], [1], [0]]) >>> ep=polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p= 1) >>> ep Matrix([ [ -sqrt(2)/2], [-sqrt(2)*I/2], [ 0]]) >>> cartesian_to_helicity(ep) Matrix([ [-1], [ 0], [ 0]]) Note that vectors in the helicity basis are built in a weird way by convention: .. math:: \vec{a} = -a_{+1}\vec{e}_{-1} +a_0\vec{e}_0 -a_{-1}\vec{e}_{+1} >>> from sympy import symbols >>> am,a0,ap = symbols("am a0 ap") >>> a=-ap*em +a0*e0 -am*ep >>> a Matrix([ [ sqrt(2)*am/2 - sqrt(2)*ap/2], [sqrt(2)*I*am/2 + sqrt(2)*I*ap/2], [ a0]]) >>> cartesian_to_helicity(a).expand() Matrix([ [am], [a0], [ap]]) We can also convert a numeric array >>> r =[[[0.0, 1.0], ... [1.0, 0.0]], ... [[0.0, -1j], ... [ 1j, 0.0]], ... [[1.0, 0.0], ... [0.0,-1.0]]] >>> cartesian_to_helicity(r, numeric=True) array([[[ 0. +0.j, 0. +0.j], [ 1.4142+0.j, 0. +0.j]], <BLANKLINE> [[ 1. +0.j, 0. +0.j], [ 0. +0.j, -1. +0.j]], <BLANKLINE> [[-0. +0.j, -1.4142+0.j], [-0. +0.j, -0. +0.j]]])
[ "r", "This", "function", "takes", "vectors", "from", "the", "cartesian", "basis", "to", "the", "helicity", "basis", ".", "For", "instance", "we", "can", "check", "what", "are", "the", "vectors", "of", "the", "helicity", "basis", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L300-L400
train
50,414
oscarlazoarjona/fast
fast/symbolic.py
vector_element
def vector_element(r, i, j): r"""Extract an matrix element of a vector operator. >>> r = define_r_components(2) >>> vector_element(r, 1, 0) Matrix([ [x_{21}], [y_{21}], [z_{21}]]) """ return Matrix([r[p][i, j] for p in range(3)])
python
def vector_element(r, i, j): r"""Extract an matrix element of a vector operator. >>> r = define_r_components(2) >>> vector_element(r, 1, 0) Matrix([ [x_{21}], [y_{21}], [z_{21}]]) """ return Matrix([r[p][i, j] for p in range(3)])
[ "def", "vector_element", "(", "r", ",", "i", ",", "j", ")", ":", "return", "Matrix", "(", "[", "r", "[", "p", "]", "[", "i", ",", "j", "]", "for", "p", "in", "range", "(", "3", ")", "]", ")" ]
r"""Extract an matrix element of a vector operator. >>> r = define_r_components(2) >>> vector_element(r, 1, 0) Matrix([ [x_{21}], [y_{21}], [z_{21}]])
[ "r", "Extract", "an", "matrix", "element", "of", "a", "vector", "operator", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L690-L701
train
50,415
oscarlazoarjona/fast
fast/symbolic.py
define_frequencies
def define_frequencies(Ne, explicitly_antisymmetric=False): u"""Define all frequencies omega_level, omega, gamma. >>> from sympy import pprint >>> pprint(define_frequencies(2), use_unicode=True) ⎛ ⎡ 0 ω₁₂⎤ ⎡ 0 γ₁₂⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠ We can make these matrices explicitly antisymmetric. >>> pprint(define_frequencies(2, explicitly_antisymmetric=True), ... use_unicode=True) ⎛ ⎡ 0 -ω₂₁⎤ ⎡ 0 -γ₂₁⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠ """ omega_level = [Symbol('omega_'+str(i+1), real=True) for i in range(Ne)] if Ne > 9: opening = "\\" comma = "," open_brace = "{" close_brace = "}" else: opening = r"" comma = "" open_brace = "" close_brace = "" omega = []; gamma = [] for i in range(Ne): row_omega = []; row_gamma = [] for j in range(Ne): if i == j: om = 0; ga = 0 elif i > j: om = Symbol(opening+r"omega_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) ga = Symbol(opening+r"gamma_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) elif explicitly_antisymmetric: om = -Symbol(opening+r"omega_" + open_brace+str(j+1)+comma+str(i+1) + close_brace, real=True) ga = -Symbol(opening+r"gamma_" + open_brace+str(j+1)+comma+str(i+1) + close_brace, real=True) else: om = Symbol(opening+r"omega_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) ga = Symbol(opening+r"gamma_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) row_omega += [om] row_gamma += [ga] omega += [row_omega] gamma += [row_gamma] omega = Matrix(omega) gamma = Matrix(gamma) return omega_level, omega, gamma
python
def define_frequencies(Ne, explicitly_antisymmetric=False): u"""Define all frequencies omega_level, omega, gamma. >>> from sympy import pprint >>> pprint(define_frequencies(2), use_unicode=True) ⎛ ⎡ 0 ω₁₂⎤ ⎡ 0 γ₁₂⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠ We can make these matrices explicitly antisymmetric. >>> pprint(define_frequencies(2, explicitly_antisymmetric=True), ... use_unicode=True) ⎛ ⎡ 0 -ω₂₁⎤ ⎡ 0 -γ₂₁⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠ """ omega_level = [Symbol('omega_'+str(i+1), real=True) for i in range(Ne)] if Ne > 9: opening = "\\" comma = "," open_brace = "{" close_brace = "}" else: opening = r"" comma = "" open_brace = "" close_brace = "" omega = []; gamma = [] for i in range(Ne): row_omega = []; row_gamma = [] for j in range(Ne): if i == j: om = 0; ga = 0 elif i > j: om = Symbol(opening+r"omega_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) ga = Symbol(opening+r"gamma_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) elif explicitly_antisymmetric: om = -Symbol(opening+r"omega_" + open_brace+str(j+1)+comma+str(i+1) + close_brace, real=True) ga = -Symbol(opening+r"gamma_" + open_brace+str(j+1)+comma+str(i+1) + close_brace, real=True) else: om = Symbol(opening+r"omega_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) ga = Symbol(opening+r"gamma_" + open_brace+str(i+1)+comma+str(j+1) + close_brace, real=True) row_omega += [om] row_gamma += [ga] omega += [row_omega] gamma += [row_gamma] omega = Matrix(omega) gamma = Matrix(gamma) return omega_level, omega, gamma
[ "def", "define_frequencies", "(", "Ne", ",", "explicitly_antisymmetric", "=", "False", ")", ":", "omega_level", "=", "[", "Symbol", "(", "'omega_'", "+", "str", "(", "i", "+", "1", ")", ",", "real", "=", "True", ")", "for", "i", "in", "range", "(", "...
u"""Define all frequencies omega_level, omega, gamma. >>> from sympy import pprint >>> pprint(define_frequencies(2), use_unicode=True) ⎛ ⎡ 0 ω₁₂⎤ ⎡ 0 γ₁₂⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠ We can make these matrices explicitly antisymmetric. >>> pprint(define_frequencies(2, explicitly_antisymmetric=True), ... use_unicode=True) ⎛ ⎡ 0 -ω₂₁⎤ ⎡ 0 -γ₂₁⎤⎞ ⎜[ω₁, ω₂], ⎢ ⎥, ⎢ ⎥⎟ ⎝ ⎣ω₂₁ 0 ⎦ ⎣γ₂₁ 0 ⎦⎠
[ "u", "Define", "all", "frequencies", "omega_level", "omega", "gamma", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L704-L772
train
50,416
oscarlazoarjona/fast
fast/symbolic.py
lindblad_terms
def lindblad_terms(gamma, rho, Ne, verbose=1): u"""Return the Lindblad terms for decays gamma in matrix form. >>> from sympy import pprint >>> aux = define_frequencies(4, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> gamma = gamma.subs({gamma[2, 0]:0, gamma[3, 0]:0, gamma[3, 1]:0}) >>> pprint(gamma, use_unicode=True) ⎡ 0 -γ₂₁ 0 0 ⎤ ⎢ ⎥ ⎢γ₂₁ 0 -γ₃₂ 0 ⎥ ⎢ ⎥ ⎢ 0 γ₃₂ 0 -γ₄₃⎥ ⎢ ⎥ ⎣ 0 0 γ₄₃ 0 ⎦ >>> rho = define_density_matrix(4) >>> pprint(lindblad_terms(gamma, rho, 4), use_unicode=True) ⎡ -γ₂₁⋅ρ₁₂ -γ₃₂⋅ρ₁₃ -γ₄₃⋅ρ₁₄ ⎤ ⎢ γ₂₁⋅ρ₂₂ ───────── ───────── ───────── ⎥ ⎢ 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₂₁⋅ρ₂₁ γ₂₁⋅ρ₂₃ γ₃₂⋅ρ₂₃ γ₂₁⋅ρ₂₄ γ₄₃⋅ρ₂₄⎥ ⎢───────── -γ₂₁⋅ρ₂₂ + γ₃₂⋅ρ₃₃ - ─────── - ─────── - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₃₂⋅ρ₃₁ γ₂₁⋅ρ₃₂ γ₃₂⋅ρ₃₂ γ₃₂⋅ρ₃₄ γ₄₃⋅ρ₃₄⎥ ⎢───────── - ─────── - ─────── -γ₃₂⋅ρ₃₃ + γ₄₃⋅ρ₄₄ - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₄₃⋅ρ₄₁ γ₂₁⋅ρ₄₂ γ₄₃⋅ρ₄₂ γ₃₂⋅ρ₄₃ γ₄₃⋅ρ₄₃ ⎥ ⎢───────── - ─────── - ─────── - ─────── - ─────── -γ₄₃⋅ρ₄₄ ⎥ ⎣ 2 2 2 2 2 ⎦ Notice that there are more terms than simply adding a decay gamma_ij*rho_ij/2 for each coherence. """ # We count the necessary Lindblad operators. Nterms = 0 for i in range(Ne): for j in range(i): if gamma[i, j] != 0: Nterms += 1 L = zeros(Ne) counter = 0 t0 = time() for i in range(Ne): for j in range(i): if gamma[i, j] != 0: counter += 1 sig = ket(j+1, Ne)*bra(i+1, Ne) L += gamma[i, j]*lindblad_operator(sig, rho) tn = time() if tn-t0 > 1: aux = "Calculated up to i={}, j={}, or {}/{} = {:2.2f} %." if verbose > 0: print(aux.format(i, j, counter, Nterms, float(counter+1)/Nterms*100)) t0 = tn return L
python
def lindblad_terms(gamma, rho, Ne, verbose=1): u"""Return the Lindblad terms for decays gamma in matrix form. >>> from sympy import pprint >>> aux = define_frequencies(4, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> gamma = gamma.subs({gamma[2, 0]:0, gamma[3, 0]:0, gamma[3, 1]:0}) >>> pprint(gamma, use_unicode=True) ⎡ 0 -γ₂₁ 0 0 ⎤ ⎢ ⎥ ⎢γ₂₁ 0 -γ₃₂ 0 ⎥ ⎢ ⎥ ⎢ 0 γ₃₂ 0 -γ₄₃⎥ ⎢ ⎥ ⎣ 0 0 γ₄₃ 0 ⎦ >>> rho = define_density_matrix(4) >>> pprint(lindblad_terms(gamma, rho, 4), use_unicode=True) ⎡ -γ₂₁⋅ρ₁₂ -γ₃₂⋅ρ₁₃ -γ₄₃⋅ρ₁₄ ⎤ ⎢ γ₂₁⋅ρ₂₂ ───────── ───────── ───────── ⎥ ⎢ 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₂₁⋅ρ₂₁ γ₂₁⋅ρ₂₃ γ₃₂⋅ρ₂₃ γ₂₁⋅ρ₂₄ γ₄₃⋅ρ₂₄⎥ ⎢───────── -γ₂₁⋅ρ₂₂ + γ₃₂⋅ρ₃₃ - ─────── - ─────── - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₃₂⋅ρ₃₁ γ₂₁⋅ρ₃₂ γ₃₂⋅ρ₃₂ γ₃₂⋅ρ₃₄ γ₄₃⋅ρ₃₄⎥ ⎢───────── - ─────── - ─────── -γ₃₂⋅ρ₃₃ + γ₄₃⋅ρ₄₄ - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₄₃⋅ρ₄₁ γ₂₁⋅ρ₄₂ γ₄₃⋅ρ₄₂ γ₃₂⋅ρ₄₃ γ₄₃⋅ρ₄₃ ⎥ ⎢───────── - ─────── - ─────── - ─────── - ─────── -γ₄₃⋅ρ₄₄ ⎥ ⎣ 2 2 2 2 2 ⎦ Notice that there are more terms than simply adding a decay gamma_ij*rho_ij/2 for each coherence. """ # We count the necessary Lindblad operators. Nterms = 0 for i in range(Ne): for j in range(i): if gamma[i, j] != 0: Nterms += 1 L = zeros(Ne) counter = 0 t0 = time() for i in range(Ne): for j in range(i): if gamma[i, j] != 0: counter += 1 sig = ket(j+1, Ne)*bra(i+1, Ne) L += gamma[i, j]*lindblad_operator(sig, rho) tn = time() if tn-t0 > 1: aux = "Calculated up to i={}, j={}, or {}/{} = {:2.2f} %." if verbose > 0: print(aux.format(i, j, counter, Nterms, float(counter+1)/Nterms*100)) t0 = tn return L
[ "def", "lindblad_terms", "(", "gamma", ",", "rho", ",", "Ne", ",", "verbose", "=", "1", ")", ":", "# We count the necessary Lindblad operators.", "Nterms", "=", "0", "for", "i", "in", "range", "(", "Ne", ")", ":", "for", "j", "in", "range", "(", "i", "...
u"""Return the Lindblad terms for decays gamma in matrix form. >>> from sympy import pprint >>> aux = define_frequencies(4, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> gamma = gamma.subs({gamma[2, 0]:0, gamma[3, 0]:0, gamma[3, 1]:0}) >>> pprint(gamma, use_unicode=True) ⎡ 0 -γ₂₁ 0 0 ⎤ ⎢ ⎥ ⎢γ₂₁ 0 -γ₃₂ 0 ⎥ ⎢ ⎥ ⎢ 0 γ₃₂ 0 -γ₄₃⎥ ⎢ ⎥ ⎣ 0 0 γ₄₃ 0 ⎦ >>> rho = define_density_matrix(4) >>> pprint(lindblad_terms(gamma, rho, 4), use_unicode=True) ⎡ -γ₂₁⋅ρ₁₂ -γ₃₂⋅ρ₁₃ -γ₄₃⋅ρ₁₄ ⎤ ⎢ γ₂₁⋅ρ₂₂ ───────── ───────── ───────── ⎥ ⎢ 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₂₁⋅ρ₂₁ γ₂₁⋅ρ₂₃ γ₃₂⋅ρ₂₃ γ₂₁⋅ρ₂₄ γ₄₃⋅ρ₂₄⎥ ⎢───────── -γ₂₁⋅ρ₂₂ + γ₃₂⋅ρ₃₃ - ─────── - ─────── - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₃₂⋅ρ₃₁ γ₂₁⋅ρ₃₂ γ₃₂⋅ρ₃₂ γ₃₂⋅ρ₃₄ γ₄₃⋅ρ₃₄⎥ ⎢───────── - ─────── - ─────── -γ₃₂⋅ρ₃₃ + γ₄₃⋅ρ₄₄ - ─────── - ───────⎥ ⎢ 2 2 2 2 2 ⎥ ⎢ ⎥ ⎢-γ₄₃⋅ρ₄₁ γ₂₁⋅ρ₄₂ γ₄₃⋅ρ₄₂ γ₃₂⋅ρ₄₃ γ₄₃⋅ρ₄₃ ⎥ ⎢───────── - ─────── - ─────── - ─────── - ─────── -γ₄₃⋅ρ₄₄ ⎥ ⎣ 2 2 2 2 2 ⎦ Notice that there are more terms than simply adding a decay gamma_ij*rho_ij/2 for each coherence.
[ "u", "Return", "the", "Lindblad", "terms", "for", "decays", "gamma", "in", "matrix", "form", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L923-L983
train
50,417
oscarlazoarjona/fast
fast/symbolic.py
define_rho_vector
def define_rho_vector(rho, Ne): u"""Define the vectorized density matrix. >>> from sympy import pprint >>> rho = define_density_matrix(3) >>> pprint(define_rho_vector(rho, 3), use_unicode=True) ⎡ ρ₂₂ ⎤ ⎢ ⎥ ⎢ ρ₃₃ ⎥ ⎢ ⎥ ⎢re(ρ₂₁)⎥ ⎢ ⎥ ⎢re(ρ₃₁)⎥ ⎢ ⎥ ⎢re(ρ₃₂)⎥ ⎢ ⎥ ⎢im(ρ₂₁)⎥ ⎢ ⎥ ⎢im(ρ₃₁)⎥ ⎢ ⎥ ⎣im(ρ₃₂)⎦ """ rho_vect = [] for mu in range(1, Ne**2): i, j, s = IJ(mu, Ne) i = i-1; j = j-1 rho_vect += [part_symbolic(rho[i, j], s)] return Matrix(rho_vect)
python
def define_rho_vector(rho, Ne): u"""Define the vectorized density matrix. >>> from sympy import pprint >>> rho = define_density_matrix(3) >>> pprint(define_rho_vector(rho, 3), use_unicode=True) ⎡ ρ₂₂ ⎤ ⎢ ⎥ ⎢ ρ₃₃ ⎥ ⎢ ⎥ ⎢re(ρ₂₁)⎥ ⎢ ⎥ ⎢re(ρ₃₁)⎥ ⎢ ⎥ ⎢re(ρ₃₂)⎥ ⎢ ⎥ ⎢im(ρ₂₁)⎥ ⎢ ⎥ ⎢im(ρ₃₁)⎥ ⎢ ⎥ ⎣im(ρ₃₂)⎦ """ rho_vect = [] for mu in range(1, Ne**2): i, j, s = IJ(mu, Ne) i = i-1; j = j-1 rho_vect += [part_symbolic(rho[i, j], s)] return Matrix(rho_vect)
[ "def", "define_rho_vector", "(", "rho", ",", "Ne", ")", ":", "rho_vect", "=", "[", "]", "for", "mu", "in", "range", "(", "1", ",", "Ne", "**", "2", ")", ":", "i", ",", "j", ",", "s", "=", "IJ", "(", "mu", ",", "Ne", ")", "i", "=", "i", "-...
u"""Define the vectorized density matrix. >>> from sympy import pprint >>> rho = define_density_matrix(3) >>> pprint(define_rho_vector(rho, 3), use_unicode=True) ⎡ ρ₂₂ ⎤ ⎢ ⎥ ⎢ ρ₃₃ ⎥ ⎢ ⎥ ⎢re(ρ₂₁)⎥ ⎢ ⎥ ⎢re(ρ₃₁)⎥ ⎢ ⎥ ⎢re(ρ₃₂)⎥ ⎢ ⎥ ⎢im(ρ₂₁)⎥ ⎢ ⎥ ⎢im(ρ₃₁)⎥ ⎢ ⎥ ⎣im(ρ₃₂)⎦
[ "u", "Define", "the", "vectorized", "density", "matrix", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1021-L1049
train
50,418
oscarlazoarjona/fast
fast/symbolic.py
calculate_A_b
def calculate_A_b(eqs, unfolding, verbose=0): u"""Calculate the equations in matrix form. >>> from sympy import symbols, pprint, I >>> rho = define_density_matrix(2, explicitly_hermitian=True, ... normalized=True) >>> Omega = symbols("Omega") >>> delta = symbols("delta", real=True) >>> hbar = symbols("hbar", positive=True) >>> H = hbar*Matrix([[0, Omega.conjugate()/2], [Omega/2, -delta]]) >>> Ne = 2 >>> aux = define_frequencies(Ne, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> eqs = I/hbar*(rho*H-H*rho) + lindblad_terms(gamma, rho, 2) >>> from fast import Unfolding >>> unfolding = Unfolding(Ne, True, True, True) >>> A, b = calculate_A_b(eqs, unfolding) >>> pprint(A, use_unicode=True) ⎡ -γ₂₁ im(Ω) -re(Ω)⎤ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢-im(Ω) ───── -δ ⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢re(Ω) δ ───── ⎥ ⎣ 2 ⎦ >>> pprint(b, use_unicode=True) ⎡ 0 ⎤ ⎢ ⎥ ⎢-im(Ω) ⎥ ⎢───────⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ re(Ω) ⎥ ⎢ ───── ⎥ ⎣ 2 ⎦ """ Ne = unfolding.Ne Nrho = unfolding.Nrho lower_triangular = unfolding.lower_triangular rho = define_density_matrix(Ne, explicitly_hermitian=lower_triangular, normalized=unfolding.normalized) rho_vect = unfolding(rho) if unfolding.real: ss_comp = {rho[i, j]: re(rho[i, j])+I*im(rho[i, j]) for j in range(Ne) for i in range(Ne)} A = []; b = [] for mu in range(Nrho): s, i, j = unfolding.IJ(mu) if verbose > 0: print mu eq = part_symbolic(eqs[i, j].subs(ss_comp), s) eq_new = 0 row = [] for nu in range(Nrho): variable = rho_vect[nu] coefficient = Derivative(eq, variable).doit() row += [coefficient] eq_new += coefficient*variable b += [-(eq-eq_new).expand()] A += [row] A = Matrix(A); b = Matrix(b) return A, b
python
def calculate_A_b(eqs, unfolding, verbose=0): u"""Calculate the equations in matrix form. >>> from sympy import symbols, pprint, I >>> rho = define_density_matrix(2, explicitly_hermitian=True, ... normalized=True) >>> Omega = symbols("Omega") >>> delta = symbols("delta", real=True) >>> hbar = symbols("hbar", positive=True) >>> H = hbar*Matrix([[0, Omega.conjugate()/2], [Omega/2, -delta]]) >>> Ne = 2 >>> aux = define_frequencies(Ne, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> eqs = I/hbar*(rho*H-H*rho) + lindblad_terms(gamma, rho, 2) >>> from fast import Unfolding >>> unfolding = Unfolding(Ne, True, True, True) >>> A, b = calculate_A_b(eqs, unfolding) >>> pprint(A, use_unicode=True) ⎡ -γ₂₁ im(Ω) -re(Ω)⎤ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢-im(Ω) ───── -δ ⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢re(Ω) δ ───── ⎥ ⎣ 2 ⎦ >>> pprint(b, use_unicode=True) ⎡ 0 ⎤ ⎢ ⎥ ⎢-im(Ω) ⎥ ⎢───────⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ re(Ω) ⎥ ⎢ ───── ⎥ ⎣ 2 ⎦ """ Ne = unfolding.Ne Nrho = unfolding.Nrho lower_triangular = unfolding.lower_triangular rho = define_density_matrix(Ne, explicitly_hermitian=lower_triangular, normalized=unfolding.normalized) rho_vect = unfolding(rho) if unfolding.real: ss_comp = {rho[i, j]: re(rho[i, j])+I*im(rho[i, j]) for j in range(Ne) for i in range(Ne)} A = []; b = [] for mu in range(Nrho): s, i, j = unfolding.IJ(mu) if verbose > 0: print mu eq = part_symbolic(eqs[i, j].subs(ss_comp), s) eq_new = 0 row = [] for nu in range(Nrho): variable = rho_vect[nu] coefficient = Derivative(eq, variable).doit() row += [coefficient] eq_new += coefficient*variable b += [-(eq-eq_new).expand()] A += [row] A = Matrix(A); b = Matrix(b) return A, b
[ "def", "calculate_A_b", "(", "eqs", ",", "unfolding", ",", "verbose", "=", "0", ")", ":", "Ne", "=", "unfolding", ".", "Ne", "Nrho", "=", "unfolding", ".", "Nrho", "lower_triangular", "=", "unfolding", ".", "lower_triangular", "rho", "=", "define_density_mat...
u"""Calculate the equations in matrix form. >>> from sympy import symbols, pprint, I >>> rho = define_density_matrix(2, explicitly_hermitian=True, ... normalized=True) >>> Omega = symbols("Omega") >>> delta = symbols("delta", real=True) >>> hbar = symbols("hbar", positive=True) >>> H = hbar*Matrix([[0, Omega.conjugate()/2], [Omega/2, -delta]]) >>> Ne = 2 >>> aux = define_frequencies(Ne, explicitly_antisymmetric=True) >>> omega_level, omega, gamma = aux >>> eqs = I/hbar*(rho*H-H*rho) + lindblad_terms(gamma, rho, 2) >>> from fast import Unfolding >>> unfolding = Unfolding(Ne, True, True, True) >>> A, b = calculate_A_b(eqs, unfolding) >>> pprint(A, use_unicode=True) ⎡ -γ₂₁ im(Ω) -re(Ω)⎤ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢-im(Ω) ───── -δ ⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ -γ₂₁ ⎥ ⎢re(Ω) δ ───── ⎥ ⎣ 2 ⎦ >>> pprint(b, use_unicode=True) ⎡ 0 ⎤ ⎢ ⎥ ⎢-im(Ω) ⎥ ⎢───────⎥ ⎢ 2 ⎥ ⎢ ⎥ ⎢ re(Ω) ⎥ ⎢ ───── ⎥ ⎣ 2 ⎦
[ "u", "Calculate", "the", "equations", "in", "matrix", "form", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1052-L1124
train
50,419
oscarlazoarjona/fast
fast/symbolic.py
phase_transformation
def phase_transformation(Ne, Nl, r, Lij, omega_laser, phase): r"""Obtain a phase transformation to eliminate explicit time dependence. >>> Ne = 2 """ ph = find_phase_transformation(Ne, Nl, r, Lij) return {phase[i]: sum([ph[i][j]*omega_laser[j] for j in range(Nl)]) for i in range(Ne)}
python
def phase_transformation(Ne, Nl, r, Lij, omega_laser, phase): r"""Obtain a phase transformation to eliminate explicit time dependence. >>> Ne = 2 """ ph = find_phase_transformation(Ne, Nl, r, Lij) return {phase[i]: sum([ph[i][j]*omega_laser[j] for j in range(Nl)]) for i in range(Ne)}
[ "def", "phase_transformation", "(", "Ne", ",", "Nl", ",", "r", ",", "Lij", ",", "omega_laser", ",", "phase", ")", ":", "ph", "=", "find_phase_transformation", "(", "Ne", ",", "Nl", ",", "r", ",", "Lij", ")", "return", "{", "phase", "[", "i", "]", "...
r"""Obtain a phase transformation to eliminate explicit time dependence. >>> Ne = 2
[ "r", "Obtain", "a", "phase", "transformation", "to", "eliminate", "explicit", "time", "dependence", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1127-L1136
train
50,420
oscarlazoarjona/fast
fast/symbolic.py
dot
def dot(a, b): r"""Dot product of two 3d vectors.""" if isinstance(a, Mul): a = a.expand() avect = 1 aivect = -1 for ai, fact in enumerate(a.args): if isinstance(fact, Vector3D): avect = fact aivect = ai break acoef = a.args[:aivect] + a.args[aivect+1:] acoef = Mul(*acoef) return acoef*dot(avect, b) if isinstance(b, Mul): b = b.expand() bvect = 1 bivect = -1 for bi, fact in enumerate(b.args): if isinstance(fact, Vector3D): bvect = fact bivect = bi break bcoef = b.args[:bivect] + b.args[bivect+1:] bcoef = Mul(*bcoef) return bcoef*dot(a, bvect) if isinstance(a, Vector3D) and isinstance(b, Vector3D): return DotProduct(a, b) if hasattr(a, "shape") and hasattr(b, "shape"): return cartesian_dot_product(a, b) print a, b, type(a), type(b), print isinstance(a, Vector3D), isinstance(b, Vector3D) raise NotImplementedError("could not catch these instances in dot!")
python
def dot(a, b): r"""Dot product of two 3d vectors.""" if isinstance(a, Mul): a = a.expand() avect = 1 aivect = -1 for ai, fact in enumerate(a.args): if isinstance(fact, Vector3D): avect = fact aivect = ai break acoef = a.args[:aivect] + a.args[aivect+1:] acoef = Mul(*acoef) return acoef*dot(avect, b) if isinstance(b, Mul): b = b.expand() bvect = 1 bivect = -1 for bi, fact in enumerate(b.args): if isinstance(fact, Vector3D): bvect = fact bivect = bi break bcoef = b.args[:bivect] + b.args[bivect+1:] bcoef = Mul(*bcoef) return bcoef*dot(a, bvect) if isinstance(a, Vector3D) and isinstance(b, Vector3D): return DotProduct(a, b) if hasattr(a, "shape") and hasattr(b, "shape"): return cartesian_dot_product(a, b) print a, b, type(a), type(b), print isinstance(a, Vector3D), isinstance(b, Vector3D) raise NotImplementedError("could not catch these instances in dot!")
[ "def", "dot", "(", "a", ",", "b", ")", ":", "if", "isinstance", "(", "a", ",", "Mul", ")", ":", "a", "=", "a", ".", "expand", "(", ")", "avect", "=", "1", "aivect", "=", "-", "1", "for", "ai", ",", "fact", "in", "enumerate", "(", "a", ".", ...
r"""Dot product of two 3d vectors.
[ "r", "Dot", "product", "of", "two", "3d", "vectors", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1390-L1428
train
50,421
oscarlazoarjona/fast
fast/symbolic.py
cross
def cross(a, b): r"""Cross product of two 3d vectors.""" if isinstance(a, Mul): a = a.expand() avect = 1 aivect = -1 for ai, fact in enumerate(a.args): if isinstance(fact, Vector3D): avect = fact aivect = ai break acoef = a.args[:aivect] + a.args[aivect+1:] acoef = Mul(*acoef) return acoef*cross(avect, b) if isinstance(b, Mul): b = b.expand() bvect = 1 bivect = -1 for bi, fact in enumerate(b.args): if isinstance(fact, Vector3D): bvect = fact bivect = bi break bcoef = b.args[:bivect] + b.args[bivect+1:] bcoef = Mul(*bcoef) return bcoef*cross(a, bvect) if isinstance(a, Vector3D) and isinstance(b, Vector3D): return CrossProduct(a, b)
python
def cross(a, b): r"""Cross product of two 3d vectors.""" if isinstance(a, Mul): a = a.expand() avect = 1 aivect = -1 for ai, fact in enumerate(a.args): if isinstance(fact, Vector3D): avect = fact aivect = ai break acoef = a.args[:aivect] + a.args[aivect+1:] acoef = Mul(*acoef) return acoef*cross(avect, b) if isinstance(b, Mul): b = b.expand() bvect = 1 bivect = -1 for bi, fact in enumerate(b.args): if isinstance(fact, Vector3D): bvect = fact bivect = bi break bcoef = b.args[:bivect] + b.args[bivect+1:] bcoef = Mul(*bcoef) return bcoef*cross(a, bvect) if isinstance(a, Vector3D) and isinstance(b, Vector3D): return CrossProduct(a, b)
[ "def", "cross", "(", "a", ",", "b", ")", ":", "if", "isinstance", "(", "a", ",", "Mul", ")", ":", "a", "=", "a", ".", "expand", "(", ")", "avect", "=", "1", "aivect", "=", "-", "1", "for", "ai", ",", "fact", "in", "enumerate", "(", "a", "."...
r"""Cross product of two 3d vectors.
[ "r", "Cross", "product", "of", "two", "3d", "vectors", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/symbolic.py#L1431-L1462
train
50,422
tilde-lab/tilde
tilde/core/settings.py
write_settings
def write_settings(settings): ''' Saves user's settings @returns True on success @returns False on failure ''' if not os.access(DATA_DIR, os.W_OK): return False try: f = open(DATA_DIR + os.sep + SETTINGS_FILE, 'w') f.writelines(json.dumps(settings, indent=0)) f.close() os.chmod(os.path.abspath(DATA_DIR + os.sep + SETTINGS_FILE), 0o777) # to avoid (or create?) IO problems with multiple users except IOError: return False else: return True
python
def write_settings(settings): ''' Saves user's settings @returns True on success @returns False on failure ''' if not os.access(DATA_DIR, os.W_OK): return False try: f = open(DATA_DIR + os.sep + SETTINGS_FILE, 'w') f.writelines(json.dumps(settings, indent=0)) f.close() os.chmod(os.path.abspath(DATA_DIR + os.sep + SETTINGS_FILE), 0o777) # to avoid (or create?) IO problems with multiple users except IOError: return False else: return True
[ "def", "write_settings", "(", "settings", ")", ":", "if", "not", "os", ".", "access", "(", "DATA_DIR", ",", "os", ".", "W_OK", ")", ":", "return", "False", "try", ":", "f", "=", "open", "(", "DATA_DIR", "+", "os", ".", "sep", "+", "SETTINGS_FILE", ...
Saves user's settings @returns True on success @returns False on failure
[ "Saves", "user", "s", "settings" ]
59841578b3503075aa85c76f9ae647b3ff92b0a3
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/core/settings.py#L109-L124
train
50,423
hammerlab/stancache
stancache/utils.py
is_field_unique_by_group
def is_field_unique_by_group(df, field_col, group_col): ''' Determine if field is constant by group in df ''' def num_unique(x): return len(pd.unique(x)) num_distinct = df.groupby(group_col)[field_col].agg(num_unique) return all(num_distinct == 1)
python
def is_field_unique_by_group(df, field_col, group_col): ''' Determine if field is constant by group in df ''' def num_unique(x): return len(pd.unique(x)) num_distinct = df.groupby(group_col)[field_col].agg(num_unique) return all(num_distinct == 1)
[ "def", "is_field_unique_by_group", "(", "df", ",", "field_col", ",", "group_col", ")", ":", "def", "num_unique", "(", "x", ")", ":", "return", "len", "(", "pd", ".", "unique", "(", "x", ")", ")", "num_distinct", "=", "df", ".", "groupby", "(", "group_c...
Determine if field is constant by group in df
[ "Determine", "if", "field", "is", "constant", "by", "group", "in", "df" ]
22f2548731d0960c14c0d41f4f64e418d3f22e4c
https://github.com/hammerlab/stancache/blob/22f2548731d0960c14c0d41f4f64e418d3f22e4c/stancache/utils.py#L36-L42
train
50,424
hammerlab/stancache
stancache/utils.py
_list_files_in_path
def _list_files_in_path(path, pattern="*.stan"): """ indexes a directory of stan files returns as dictionary containing contents of files """ results = [] for dirname, subdirs, files in os.walk(path): for name in files: if fnmatch(name, pattern): results.append(os.path.join(dirname, name)) return(results)
python
def _list_files_in_path(path, pattern="*.stan"): """ indexes a directory of stan files returns as dictionary containing contents of files """ results = [] for dirname, subdirs, files in os.walk(path): for name in files: if fnmatch(name, pattern): results.append(os.path.join(dirname, name)) return(results)
[ "def", "_list_files_in_path", "(", "path", ",", "pattern", "=", "\"*.stan\"", ")", ":", "results", "=", "[", "]", "for", "dirname", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "name", "in", "files", ":", "if",...
indexes a directory of stan files returns as dictionary containing contents of files
[ "indexes", "a", "directory", "of", "stan", "files", "returns", "as", "dictionary", "containing", "contents", "of", "files" ]
22f2548731d0960c14c0d41f4f64e418d3f22e4c
https://github.com/hammerlab/stancache/blob/22f2548731d0960c14c0d41f4f64e418d3f22e4c/stancache/utils.py#L45-L56
train
50,425
tilde-lab/tilde
tilde/classifiers/perovskites.py
generate_random_perovskite
def generate_random_perovskite(lat=None): ''' This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered. ''' if not lat: lat = round(random.uniform(3.5, Perovskite_tilting.OCTAHEDRON_BOND_LENGTH_LIMIT*2), 3) A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) Ci_site = random.choice(Perovskite_Structure.C) Cii_site = random.choice(Perovskite_Structure.C) while covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] < 0.05 or \ covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] > 0.5: A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) return crystal( [A_site, B_site, Ci_site, Cii_site], [(0.5, 0.25, 0.0), (0.0, 0.0, 0.0), (0.0, 0.25, 0.0), (0.25, 0.0, 0.75)], spacegroup=62, cellpar=[lat*math.sqrt(2), 2*lat, lat*math.sqrt(2), 90, 90, 90] )
python
def generate_random_perovskite(lat=None): ''' This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered. ''' if not lat: lat = round(random.uniform(3.5, Perovskite_tilting.OCTAHEDRON_BOND_LENGTH_LIMIT*2), 3) A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) Ci_site = random.choice(Perovskite_Structure.C) Cii_site = random.choice(Perovskite_Structure.C) while covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] < 0.05 or \ covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] > 0.5: A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) return crystal( [A_site, B_site, Ci_site, Cii_site], [(0.5, 0.25, 0.0), (0.0, 0.0, 0.0), (0.0, 0.25, 0.0), (0.25, 0.0, 0.75)], spacegroup=62, cellpar=[lat*math.sqrt(2), 2*lat, lat*math.sqrt(2), 90, 90, 90] )
[ "def", "generate_random_perovskite", "(", "lat", "=", "None", ")", ":", "if", "not", "lat", ":", "lat", "=", "round", "(", "random", ".", "uniform", "(", "3.5", ",", "Perovskite_tilting", ".", "OCTAHEDRON_BOND_LENGTH_LIMIT", "*", "2", ")", ",", "3", ")", ...
This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered.
[ "This", "generates", "a", "random", "valid", "perovskite", "structure", "in", "ASE", "format", ".", "Useful", "for", "testing", ".", "Binary", "and", "organic", "perovskites", "are", "not", "considered", "." ]
59841578b3503075aa85c76f9ae647b3ff92b0a3
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/classifiers/perovskites.py#L126-L151
train
50,426
alexwlchan/specktre
src/specktre/cli.py
check_positive_integer
def check_positive_integer(name, value): """Check a value is a positive integer. Returns the value if so, raises ValueError otherwise. """ try: value = int(value) is_positive = (value > 0) except ValueError: raise ValueError('%s should be an integer; got %r' % (name, value)) if is_positive: return value else: raise ValueError('%s should be positive; got %r' % (name, value))
python
def check_positive_integer(name, value): """Check a value is a positive integer. Returns the value if so, raises ValueError otherwise. """ try: value = int(value) is_positive = (value > 0) except ValueError: raise ValueError('%s should be an integer; got %r' % (name, value)) if is_positive: return value else: raise ValueError('%s should be positive; got %r' % (name, value))
[ "def", "check_positive_integer", "(", "name", ",", "value", ")", ":", "try", ":", "value", "=", "int", "(", "value", ")", "is_positive", "=", "(", "value", ">", "0", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'%s should be an integer; got...
Check a value is a positive integer. Returns the value if so, raises ValueError otherwise.
[ "Check", "a", "value", "is", "a", "positive", "integer", "." ]
dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc
https://github.com/alexwlchan/specktre/blob/dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc/src/specktre/cli.py#L40-L55
train
50,427
alexwlchan/specktre
src/specktre/cli.py
check_color_input
def check_color_input(value): """Check a value is a valid colour input. Returns a parsed `RGBColor` instance if so, raises ValueError otherwise. """ value = value.lower() # Trim a leading hash if value.startswith('#'): value = value[1:] if len(value) != 6: raise ValueError( 'Color should be six hexadecimal digits, got %r (%s)' % (value, len(value))) if re.sub(r'[a-f0-9]', '', value): raise ValueError( 'Color should only contain hex characters, got %r' % value) red = int(value[0:2], base=16) green = int(value[2:4], base=16) blue = int(value[4:6], base=16) return RGBColor(red, green, blue)
python
def check_color_input(value): """Check a value is a valid colour input. Returns a parsed `RGBColor` instance if so, raises ValueError otherwise. """ value = value.lower() # Trim a leading hash if value.startswith('#'): value = value[1:] if len(value) != 6: raise ValueError( 'Color should be six hexadecimal digits, got %r (%s)' % (value, len(value))) if re.sub(r'[a-f0-9]', '', value): raise ValueError( 'Color should only contain hex characters, got %r' % value) red = int(value[0:2], base=16) green = int(value[2:4], base=16) blue = int(value[4:6], base=16) return RGBColor(red, green, blue)
[ "def", "check_color_input", "(", "value", ")", ":", "value", "=", "value", ".", "lower", "(", ")", "# Trim a leading hash", "if", "value", ".", "startswith", "(", "'#'", ")", ":", "value", "=", "value", "[", "1", ":", "]", "if", "len", "(", "value", ...
Check a value is a valid colour input. Returns a parsed `RGBColor` instance if so, raises ValueError otherwise.
[ "Check", "a", "value", "is", "a", "valid", "colour", "input", "." ]
dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc
https://github.com/alexwlchan/specktre/blob/dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc/src/specktre/cli.py#L58-L82
train
50,428
tilde-lab/tilde
tilde/apps/perovskite_tilting/perovskite_tilting.py
Perovskite_tilting.get_octahedra
def get_octahedra(self, atoms, periodicity=3): ''' Extract octahedra as lists of sequence numbers of corner atoms ''' octahedra = [] for n, i in enumerate(atoms): found = [] if i.symbol in Perovskite_Structure.B: for m, j in enumerate(self.virtual_atoms): if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT: found.append(m) if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]): octahedra.append([n, found]) if not len(octahedra): raise ModuleError("Cannot extract valid octahedra: not enough corner atoms found!") return octahedra
python
def get_octahedra(self, atoms, periodicity=3): ''' Extract octahedra as lists of sequence numbers of corner atoms ''' octahedra = [] for n, i in enumerate(atoms): found = [] if i.symbol in Perovskite_Structure.B: for m, j in enumerate(self.virtual_atoms): if j.symbol in Perovskite_Structure.C and self.virtual_atoms.get_distance(n, m) <= self.OCTAHEDRON_BOND_LENGTH_LIMIT: found.append(m) if (periodicity == 3 and len(found) == 6) or (periodicity == 2 and len(found) in [5, 6]): octahedra.append([n, found]) if not len(octahedra): raise ModuleError("Cannot extract valid octahedra: not enough corner atoms found!") return octahedra
[ "def", "get_octahedra", "(", "self", ",", "atoms", ",", "periodicity", "=", "3", ")", ":", "octahedra", "=", "[", "]", "for", "n", ",", "i", "in", "enumerate", "(", "atoms", ")", ":", "found", "=", "[", "]", "if", "i", ".", "symbol", "in", "Perov...
Extract octahedra as lists of sequence numbers of corner atoms
[ "Extract", "octahedra", "as", "lists", "of", "sequence", "numbers", "of", "corner", "atoms" ]
59841578b3503075aa85c76f9ae647b3ff92b0a3
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/apps/perovskite_tilting/perovskite_tilting.py#L141-L157
train
50,429
tilde-lab/tilde
tilde/apps/perovskite_tilting/perovskite_tilting.py
Perovskite_tilting.get_tiltplane
def get_tiltplane(self, sequence): ''' Extract the main tilting plane basing on Z coordinate ''' sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z) in_plane = [] for i in range(0, len(sequence)-4): if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \ abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \ abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE: in_plane = [sequence[j] for j in range(i, i+4)] return in_plane
python
def get_tiltplane(self, sequence): ''' Extract the main tilting plane basing on Z coordinate ''' sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z) in_plane = [] for i in range(0, len(sequence)-4): if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \ abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \ abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE: in_plane = [sequence[j] for j in range(i, i+4)] return in_plane
[ "def", "get_tiltplane", "(", "self", ",", "sequence", ")", ":", "sequence", "=", "sorted", "(", "sequence", ",", "key", "=", "lambda", "x", ":", "self", ".", "virtual_atoms", "[", "x", "]", ".", "z", ")", "in_plane", "=", "[", "]", "for", "i", "in"...
Extract the main tilting plane basing on Z coordinate
[ "Extract", "the", "main", "tilting", "plane", "basing", "on", "Z", "coordinate" ]
59841578b3503075aa85c76f9ae647b3ff92b0a3
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/apps/perovskite_tilting/perovskite_tilting.py#L159-L170
train
50,430
deployed/django-emailtemplates
emailtemplates/registry.py
EmailTemplateRegistry.register
def register(self, path, help_text=None, help_context=None): """ Registers email template. Example usage: email_templates.register('hello_template.html', help_text=u'Hello template', help_context={'username': u'Name of user in hello expression'}) :param path: Template file path. It will become immutable registry lookup key. :param help_text: Help text to describe template in admin site :param help_context: Dictionary of possible keys used in the context and description of their content `help_context` items values may be strings or tuples of two strings. If strings, then email template preview will use variable names to fill context, otherwise the second tuple element will become example value. If an email template is already registered, this will raise AlreadyRegistered. """ if path in self._registry: raise AlreadyRegistered('The template %s is already registered' % path) self._registry[path] = RegistrationItem(path, help_text, help_context) logger.debug("Registered email template %s", path)
python
def register(self, path, help_text=None, help_context=None): """ Registers email template. Example usage: email_templates.register('hello_template.html', help_text=u'Hello template', help_context={'username': u'Name of user in hello expression'}) :param path: Template file path. It will become immutable registry lookup key. :param help_text: Help text to describe template in admin site :param help_context: Dictionary of possible keys used in the context and description of their content `help_context` items values may be strings or tuples of two strings. If strings, then email template preview will use variable names to fill context, otherwise the second tuple element will become example value. If an email template is already registered, this will raise AlreadyRegistered. """ if path in self._registry: raise AlreadyRegistered('The template %s is already registered' % path) self._registry[path] = RegistrationItem(path, help_text, help_context) logger.debug("Registered email template %s", path)
[ "def", "register", "(", "self", ",", "path", ",", "help_text", "=", "None", ",", "help_context", "=", "None", ")", ":", "if", "path", "in", "self", ".", "_registry", ":", "raise", "AlreadyRegistered", "(", "'The template %s is already registered'", "%", "path"...
Registers email template. Example usage: email_templates.register('hello_template.html', help_text=u'Hello template', help_context={'username': u'Name of user in hello expression'}) :param path: Template file path. It will become immutable registry lookup key. :param help_text: Help text to describe template in admin site :param help_context: Dictionary of possible keys used in the context and description of their content `help_context` items values may be strings or tuples of two strings. If strings, then email template preview will use variable names to fill context, otherwise the second tuple element will become example value. If an email template is already registered, this will raise AlreadyRegistered.
[ "Registers", "email", "template", "." ]
0e95139989dbcf7e624153ddcd7b5b66b48eb6eb
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/registry.py#L84-L104
train
50,431
deployed/django-emailtemplates
emailtemplates/registry.py
EmailTemplateRegistry.get_registration
def get_registration(self, path): """ Returns registration item for specified path. If an email template is not registered, this will raise NotRegistered. """ if not self.is_registered(path): raise NotRegistered("Email template not registered") return self._registry[path]
python
def get_registration(self, path): """ Returns registration item for specified path. If an email template is not registered, this will raise NotRegistered. """ if not self.is_registered(path): raise NotRegistered("Email template not registered") return self._registry[path]
[ "def", "get_registration", "(", "self", ",", "path", ")", ":", "if", "not", "self", ".", "is_registered", "(", "path", ")", ":", "raise", "NotRegistered", "(", "\"Email template not registered\"", ")", "return", "self", ".", "_registry", "[", "path", "]" ]
Returns registration item for specified path. If an email template is not registered, this will raise NotRegistered.
[ "Returns", "registration", "item", "for", "specified", "path", "." ]
0e95139989dbcf7e624153ddcd7b5b66b48eb6eb
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/registry.py#L109-L117
train
50,432
deployed/django-emailtemplates
emailtemplates/registry.py
EmailTemplateRegistry.get_form_help_text
def get_form_help_text(self, path): """ Returns text that can be used as form help text for creating email templates. """ try: form_help_text = self.get_registration(path).as_form_help_text() except NotRegistered: form_help_text = u"" return form_help_text
python
def get_form_help_text(self, path): """ Returns text that can be used as form help text for creating email templates. """ try: form_help_text = self.get_registration(path).as_form_help_text() except NotRegistered: form_help_text = u"" return form_help_text
[ "def", "get_form_help_text", "(", "self", ",", "path", ")", ":", "try", ":", "form_help_text", "=", "self", ".", "get_registration", "(", "path", ")", ".", "as_form_help_text", "(", ")", "except", "NotRegistered", ":", "form_help_text", "=", "u\"\"", "return",...
Returns text that can be used as form help text for creating email templates.
[ "Returns", "text", "that", "can", "be", "used", "as", "form", "help", "text", "for", "creating", "email", "templates", "." ]
0e95139989dbcf7e624153ddcd7b5b66b48eb6eb
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/registry.py#L137-L145
train
50,433
myth/pepper8
pepper8/generator.py
HtmlGenerator.report_build_messages
def report_build_messages(self): """ Checks environment variables to see whether pepper8 is run under a build agent such as TeamCity and performs the adequate actions to report statistics. Will not perform any action if HTML output is written to OUTPUT_FILE and not stdout. Currently only supports TeamCity. :return: A list of build message strings destined for stdout """ if os.getenv('TEAMCITY_VERSION'): tc_build_message_warning = "##teamcity[buildStatisticValue key='pepper8warnings' value='{}']\n" tc_build_message_error = "##teamcity[buildStatisticValue key='pepper8errors' value='{}']\n" stdout.write(tc_build_message_warning.format(self.total_warnings)) stdout.write(tc_build_message_error.format(self.total_errors)) stdout.flush()
python
def report_build_messages(self): """ Checks environment variables to see whether pepper8 is run under a build agent such as TeamCity and performs the adequate actions to report statistics. Will not perform any action if HTML output is written to OUTPUT_FILE and not stdout. Currently only supports TeamCity. :return: A list of build message strings destined for stdout """ if os.getenv('TEAMCITY_VERSION'): tc_build_message_warning = "##teamcity[buildStatisticValue key='pepper8warnings' value='{}']\n" tc_build_message_error = "##teamcity[buildStatisticValue key='pepper8errors' value='{}']\n" stdout.write(tc_build_message_warning.format(self.total_warnings)) stdout.write(tc_build_message_error.format(self.total_errors)) stdout.flush()
[ "def", "report_build_messages", "(", "self", ")", ":", "if", "os", ".", "getenv", "(", "'TEAMCITY_VERSION'", ")", ":", "tc_build_message_warning", "=", "\"##teamcity[buildStatisticValue key='pepper8warnings' value='{}']\\n\"", "tc_build_message_error", "=", "\"##teamcity[buildS...
Checks environment variables to see whether pepper8 is run under a build agent such as TeamCity and performs the adequate actions to report statistics. Will not perform any action if HTML output is written to OUTPUT_FILE and not stdout. Currently only supports TeamCity. :return: A list of build message strings destined for stdout
[ "Checks", "environment", "variables", "to", "see", "whether", "pepper8", "is", "run", "under", "a", "build", "agent", "such", "as", "TeamCity", "and", "performs", "the", "adequate", "actions", "to", "report", "statistics", "." ]
98ffed4089241d8d3c1048995bc6777a2f3abdda
https://github.com/myth/pepper8/blob/98ffed4089241d8d3c1048995bc6777a2f3abdda/pepper8/generator.py#L129-L146
train
50,434
oscarlazoarjona/fast
fast/bloch.py
phase_transformation
def phase_transformation(Ne, Nl, rm, xi, return_equations=False): """Returns a phase transformation theta_i. The phase transformation is defined in a way such that theta1 + omega_level1 = 0. >>> xi = np.zeros((1, 2, 2)) >>> xi[0, 1, 0] = 1.0 >>> xi[0, 0, 1] = 1.0 >>> rm = np.zeros((3, 2, 2)) >>> rm[0, 1, 0] = 1.0 >>> rm[1, 1, 0] = 1.0 >>> rm[2, 1, 0] = 1.0 >>> phase_transformation(2, 1, rm, xi) [-omega_1, -omega_1 - varpi_1] """ # We first define the needed variables E0, omega_laser = define_laser_variables(Nl) theta = [Symbol('theta'+str(i+1)) for i in range(Ne)] # We check for the case of xi being a list of matrices. if type(xi) == list: xi = np.array([[[xi[l][i, j] for j in range(Ne)] for i in range(Ne)] for l in range(Nl)]) # We find all the equations that the specified problem has to fulfil. eqs = [] for i in range(Ne): for j in range(0, i): if (rm[0][i, j] != 0) or \ (rm[1][i, j] != 0) or \ (rm[2][i, j] != 0): for l in range(Nl): if xi[l, i, j] == 1: eqs += [-omega_laser[l] + theta[j] - theta[i]] if return_equations: return eqs # We solve the system of equations. sol = sympy.solve(eqs, theta, dict=True) sol = sol[0] # We add any missing theta that may be left outside if the system is # under determined. extra_thetas = [] for i in range(Ne): if theta[i] not in sol.keys(): sol.update({theta[i]: theta[i]}) extra_thetas += [theta[i]] # We make the solution such that theta1 + omega_level1 = 0. omega_level, omega, gamma = define_frequencies(Ne) eq_crit = sol[theta[0]] + omega_level[0] ss = sympy.solve(eq_crit, extra_thetas[0])[0] ss = {extra_thetas[0]: ss} sol_simple = [sol[theta[i]].subs(ss) for i in range(Ne)] # sol = [] # for i in range(Ne): # soli = [] # for l in range(Nl): # soli += [sympy.diff(sol_simple[theta[i]], omega_laser[l])] # sol += [soli] return sol_simple
python
def phase_transformation(Ne, Nl, rm, xi, return_equations=False): """Returns a phase transformation theta_i. The phase transformation is defined in a way such that theta1 + omega_level1 = 0. >>> xi = np.zeros((1, 2, 2)) >>> xi[0, 1, 0] = 1.0 >>> xi[0, 0, 1] = 1.0 >>> rm = np.zeros((3, 2, 2)) >>> rm[0, 1, 0] = 1.0 >>> rm[1, 1, 0] = 1.0 >>> rm[2, 1, 0] = 1.0 >>> phase_transformation(2, 1, rm, xi) [-omega_1, -omega_1 - varpi_1] """ # We first define the needed variables E0, omega_laser = define_laser_variables(Nl) theta = [Symbol('theta'+str(i+1)) for i in range(Ne)] # We check for the case of xi being a list of matrices. if type(xi) == list: xi = np.array([[[xi[l][i, j] for j in range(Ne)] for i in range(Ne)] for l in range(Nl)]) # We find all the equations that the specified problem has to fulfil. eqs = [] for i in range(Ne): for j in range(0, i): if (rm[0][i, j] != 0) or \ (rm[1][i, j] != 0) or \ (rm[2][i, j] != 0): for l in range(Nl): if xi[l, i, j] == 1: eqs += [-omega_laser[l] + theta[j] - theta[i]] if return_equations: return eqs # We solve the system of equations. sol = sympy.solve(eqs, theta, dict=True) sol = sol[0] # We add any missing theta that may be left outside if the system is # under determined. extra_thetas = [] for i in range(Ne): if theta[i] not in sol.keys(): sol.update({theta[i]: theta[i]}) extra_thetas += [theta[i]] # We make the solution such that theta1 + omega_level1 = 0. omega_level, omega, gamma = define_frequencies(Ne) eq_crit = sol[theta[0]] + omega_level[0] ss = sympy.solve(eq_crit, extra_thetas[0])[0] ss = {extra_thetas[0]: ss} sol_simple = [sol[theta[i]].subs(ss) for i in range(Ne)] # sol = [] # for i in range(Ne): # soli = [] # for l in range(Nl): # soli += [sympy.diff(sol_simple[theta[i]], omega_laser[l])] # sol += [soli] return sol_simple
[ "def", "phase_transformation", "(", "Ne", ",", "Nl", ",", "rm", ",", "xi", ",", "return_equations", "=", "False", ")", ":", "# We first define the needed variables", "E0", ",", "omega_laser", "=", "define_laser_variables", "(", "Nl", ")", "theta", "=", "[", "S...
Returns a phase transformation theta_i. The phase transformation is defined in a way such that theta1 + omega_level1 = 0. >>> xi = np.zeros((1, 2, 2)) >>> xi[0, 1, 0] = 1.0 >>> xi[0, 0, 1] = 1.0 >>> rm = np.zeros((3, 2, 2)) >>> rm[0, 1, 0] = 1.0 >>> rm[1, 1, 0] = 1.0 >>> rm[2, 1, 0] = 1.0 >>> phase_transformation(2, 1, rm, xi) [-omega_1, -omega_1 - varpi_1]
[ "Returns", "a", "phase", "transformation", "theta_i", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L242-L311
train
50,435
oscarlazoarjona/fast
fast/bloch.py
define_simplification
def define_simplification(omega_level, xi, Nl): """Return a simplifying function, its inverse, and simplified frequencies. This implements an index iu that labels energies in a non-degenerate way. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> print(omega_levelu) [0.0, 100.0, 200.0, 300.0] >>> print(Neu) 4 >>> print(xiu) [[[0. 1. 0. 0.] [1. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] <BLANKLINE> [[0. 0. 1. 1.] [0. 0. 0. 0.] [1. 0. 0. 0.] [1. 0. 0. 0.]]] """ try: Ne = len(omega_level) except: Ne = omega_level.shape[0] ##################################### # 1 We calculate the symplifying functions. om = omega_level[0] iu = 0; Neu = 1 omega_levelu = [om] d = {}; di = {0: 0} for i in range(Ne): if omega_level[i] != om: iu += 1 om = omega_level[i] Neu += 1 omega_levelu += [om] di.update({iu: i}) d.update({i: iu}) def u(i): return d[i] def invu(iu): return di[iu] ##################################### # 2 We build the simplified xi. Neu = len(omega_levelu) xiu = np.array([[[xi[l, invu(i), invu(j)] for j in range(Neu)] for i in range(Neu)] for l in range(Nl)]) ##################################### return u, invu, omega_levelu, Neu, xiu
python
def define_simplification(omega_level, xi, Nl): """Return a simplifying function, its inverse, and simplified frequencies. This implements an index iu that labels energies in a non-degenerate way. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> print(omega_levelu) [0.0, 100.0, 200.0, 300.0] >>> print(Neu) 4 >>> print(xiu) [[[0. 1. 0. 0.] [1. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] <BLANKLINE> [[0. 0. 1. 1.] [0. 0. 0. 0.] [1. 0. 0. 0.] [1. 0. 0. 0.]]] """ try: Ne = len(omega_level) except: Ne = omega_level.shape[0] ##################################### # 1 We calculate the symplifying functions. om = omega_level[0] iu = 0; Neu = 1 omega_levelu = [om] d = {}; di = {0: 0} for i in range(Ne): if omega_level[i] != om: iu += 1 om = omega_level[i] Neu += 1 omega_levelu += [om] di.update({iu: i}) d.update({i: iu}) def u(i): return d[i] def invu(iu): return di[iu] ##################################### # 2 We build the simplified xi. Neu = len(omega_levelu) xiu = np.array([[[xi[l, invu(i), invu(j)] for j in range(Neu)] for i in range(Neu)] for l in range(Nl)]) ##################################### return u, invu, omega_levelu, Neu, xiu
[ "def", "define_simplification", "(", "omega_level", ",", "xi", ",", "Nl", ")", ":", "try", ":", "Ne", "=", "len", "(", "omega_level", ")", "except", ":", "Ne", "=", "omega_level", ".", "shape", "[", "0", "]", "#####################################", "# 1 We ...
Return a simplifying function, its inverse, and simplified frequencies. This implements an index iu that labels energies in a non-degenerate way. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> print(omega_levelu) [0.0, 100.0, 200.0, 300.0] >>> print(Neu) 4 >>> print(xiu) [[[0. 1. 0. 0.] [1. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] <BLANKLINE> [[0. 0. 1. 1.] [0. 0. 0. 0.] [1. 0. 0. 0.] [1. 0. 0. 0.]]]
[ "Return", "a", "simplifying", "function", "its", "inverse", "and", "simplified", "frequencies", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L314-L380
train
50,436
oscarlazoarjona/fast
fast/bloch.py
find_omega_min
def find_omega_min(omega_levelu, Neu, Nl, xiu): r"""Find the smallest transition frequency for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> find_omega_min(omega_levelu, Neu, Nl, xiu) ([100.0, 200.0], [1, 2], [0, 0]) """ omega_min = []; iu0 = []; ju0 = [] for l in range(Nl): omegasl = [] for iu in range(Neu): for ju in range(iu): if xiu[l, iu, ju] == 1: omegasl += [(omega_levelu[iu]-omega_levelu[ju], iu, ju)] omegasl = list(sorted(omegasl)) omega_min += [omegasl[0][0]] iu0 += [omegasl[0][1]] ju0 += [omegasl[0][2]] return omega_min, iu0, ju0
python
def find_omega_min(omega_levelu, Neu, Nl, xiu): r"""Find the smallest transition frequency for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> find_omega_min(omega_levelu, Neu, Nl, xiu) ([100.0, 200.0], [1, 2], [0, 0]) """ omega_min = []; iu0 = []; ju0 = [] for l in range(Nl): omegasl = [] for iu in range(Neu): for ju in range(iu): if xiu[l, iu, ju] == 1: omegasl += [(omega_levelu[iu]-omega_levelu[ju], iu, ju)] omegasl = list(sorted(omegasl)) omega_min += [omegasl[0][0]] iu0 += [omegasl[0][1]] ju0 += [omegasl[0][2]] return omega_min, iu0, ju0
[ "def", "find_omega_min", "(", "omega_levelu", ",", "Neu", ",", "Nl", ",", "xiu", ")", ":", "omega_min", "=", "[", "]", "iu0", "=", "[", "]", "ju0", "=", "[", "]", "for", "l", "in", "range", "(", "Nl", ")", ":", "omegasl", "=", "[", "]", "for", ...
r"""Find the smallest transition frequency for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> find_omega_min(omega_levelu, Neu, Nl, xiu) ([100.0, 200.0], [1, 2], [0, 0])
[ "r", "Find", "the", "smallest", "transition", "frequency", "for", "each", "field", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L383-L414
train
50,437
oscarlazoarjona/fast
fast/bloch.py
detunings_indices
def detunings_indices(Neu, Nl, xiu): r"""Get the indices of the transitions of all fields. They are returned in the form [[(i1, j1), (i2, j2)], ...,[(i1, j1)]]. that is, one list of pairs of indices for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> detunings_indices(Neu, Nl, xiu) [[(1, 0)], [(2, 0), (3, 0)]] """ pairs = [] for l in range(Nl): ind = [] for iu in range(Neu): for ju in range(iu): if xiu[l, iu, ju] == 1: ind += [(iu, ju)] pairs += [ind] return pairs
python
def detunings_indices(Neu, Nl, xiu): r"""Get the indices of the transitions of all fields. They are returned in the form [[(i1, j1), (i2, j2)], ...,[(i1, j1)]]. that is, one list of pairs of indices for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> detunings_indices(Neu, Nl, xiu) [[(1, 0)], [(2, 0), (3, 0)]] """ pairs = [] for l in range(Nl): ind = [] for iu in range(Neu): for ju in range(iu): if xiu[l, iu, ju] == 1: ind += [(iu, ju)] pairs += [ind] return pairs
[ "def", "detunings_indices", "(", "Neu", ",", "Nl", ",", "xiu", ")", ":", "pairs", "=", "[", "]", "for", "l", "in", "range", "(", "Nl", ")", ":", "ind", "=", "[", "]", "for", "iu", "in", "range", "(", "Neu", ")", ":", "for", "ju", "in", "range...
r"""Get the indices of the transitions of all fields. They are returned in the form [[(i1, j1), (i2, j2)], ...,[(i1, j1)]]. that is, one list of pairs of indices for each field. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> detunings_indices(Neu, Nl, xiu) [[(1, 0)], [(2, 0), (3, 0)]]
[ "r", "Get", "the", "indices", "of", "the", "transitions", "of", "all", "fields", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L417-L448
train
50,438
oscarlazoarjona/fast
fast/bloch.py
detunings_code
def detunings_code(Neu, Nl, pairs, omega_levelu, iu0, ju0): r"""Get the code to calculate the simplified detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> print(detunings_code(Neu, Nl, pairs, omega_levelu, iu0, ju0)) delta1_2_1 = detuning_knob[0] delta2_3_1 = detuning_knob[1] delta2_4_1 = detuning_knob[1] + (-100.0) <BLANKLINE> """ code_det = "" for l in range(Nl): for pair in pairs[l]: iu, ju = pair code_det += " delta"+str(l+1) code_det += "_"+str(iu+1) code_det += "_"+str(ju+1) code_det += " = detuning_knob["+str(l)+"]" corr = -omega_levelu[iu]+omega_levelu[iu0[l]] corr = -omega_levelu[ju0[l]]+omega_levelu[ju] + corr if corr != 0: code_det += " + ("+str(corr)+")" code_det += "\n" return code_det
python
def detunings_code(Neu, Nl, pairs, omega_levelu, iu0, ju0): r"""Get the code to calculate the simplified detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> print(detunings_code(Neu, Nl, pairs, omega_levelu, iu0, ju0)) delta1_2_1 = detuning_knob[0] delta2_3_1 = detuning_knob[1] delta2_4_1 = detuning_knob[1] + (-100.0) <BLANKLINE> """ code_det = "" for l in range(Nl): for pair in pairs[l]: iu, ju = pair code_det += " delta"+str(l+1) code_det += "_"+str(iu+1) code_det += "_"+str(ju+1) code_det += " = detuning_knob["+str(l)+"]" corr = -omega_levelu[iu]+omega_levelu[iu0[l]] corr = -omega_levelu[ju0[l]]+omega_levelu[ju] + corr if corr != 0: code_det += " + ("+str(corr)+")" code_det += "\n" return code_det
[ "def", "detunings_code", "(", "Neu", ",", "Nl", ",", "pairs", ",", "omega_levelu", ",", "iu0", ",", "ju0", ")", ":", "code_det", "=", "\"\"", "for", "l", "in", "range", "(", "Nl", ")", ":", "for", "pair", "in", "pairs", "[", "l", "]", ":", "iu", ...
r"""Get the code to calculate the simplified detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> print(detunings_code(Neu, Nl, pairs, omega_levelu, iu0, ju0)) delta1_2_1 = detuning_knob[0] delta2_3_1 = detuning_knob[1] delta2_4_1 = detuning_knob[1] + (-100.0) <BLANKLINE>
[ "r", "Get", "the", "code", "to", "calculate", "the", "simplified", "detunings", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L451-L489
train
50,439
oscarlazoarjona/fast
fast/bloch.py
detunings_combinations
def detunings_combinations(pairs): r"""Return all combinations of detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> pairs = detunings_indices(Neu, Nl, xiu) >>> detunings_combinations(pairs) [[(1, 0), (2, 0)], [(1, 0), (3, 0)]] """ def iter(pairs, combs, l): combs_n = [] for i in range(len(combs)): for j in range(len(pairs[l])): combs_n += [combs[i] + [pairs[l][j]]] return combs_n Nl = len(pairs) combs = [[pairs[0][k]] for k in range(len(pairs[0]))] for l in range(1, Nl): combs = iter(pairs, combs, 1) return combs
python
def detunings_combinations(pairs): r"""Return all combinations of detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> pairs = detunings_indices(Neu, Nl, xiu) >>> detunings_combinations(pairs) [[(1, 0), (2, 0)], [(1, 0), (3, 0)]] """ def iter(pairs, combs, l): combs_n = [] for i in range(len(combs)): for j in range(len(pairs[l])): combs_n += [combs[i] + [pairs[l][j]]] return combs_n Nl = len(pairs) combs = [[pairs[0][k]] for k in range(len(pairs[0]))] for l in range(1, Nl): combs = iter(pairs, combs, 1) return combs
[ "def", "detunings_combinations", "(", "pairs", ")", ":", "def", "iter", "(", "pairs", ",", "combs", ",", "l", ")", ":", "combs_n", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "combs", ")", ")", ":", "for", "j", "in", "range", "(", ...
r"""Return all combinations of detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> pairs = detunings_indices(Neu, Nl, xiu) >>> detunings_combinations(pairs) [[(1, 0), (2, 0)], [(1, 0), (3, 0)]]
[ "r", "Return", "all", "combinations", "of", "detunings", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L492-L524
train
50,440
oscarlazoarjona/fast
fast/bloch.py
detunings_rewrite
def detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, omega_levelu, iu0, ju0): r"""Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]' """ Nl = len(omega_laser) Neu = len(symb_omega_levelu) # We find the coefficients a_i of the field frequencies. a = [diff(expr, omega_laser[l]) for l in range(Nl)] # We look for a combination of the detunings obtained with the # function detunings_code. For each combination we sum the # detunings weighed by a_i. success = False for comb in combs: expr_try = 0 for l in range(Nl): expr_try += a[l]*(omega_laser[l] - symb_omega_levelu[comb[l][0]] + symb_omega_levelu[comb[l][1]]) if expr-expr_try == 0: success = True break assign = "" if success: for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "delta"+str(l+1) assign += "_"+str(comb[l][0]+1) assign += "_"+str(comb[l][1]+1) else: # We get the code for Hii using detuning knobs. # We find out the remainder terms. _remainder = expr - sum([a[l]*omega_laser[l] for l in range(Nl)]) # We find the coefficients of the remainder. b = [diff(_remainder, symb_omega_levelu[j]) for j in range(Neu)] # We calculate the remainder numerically. remainder = sum([b[j]*omega_levelu[j] for j in range(Neu)]) # We add the contributions from the detuning knobs. remainder += sum([a[l]*(omega_levelu[iu0[l]] - omega_levelu[ju0[l]]) for l in range(Nl)]) assign = str(remainder) # We get the code for Hii using detuning knobs. for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "detuning_knob["+str(l)+"]" return assign
python
def detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, omega_levelu, iu0, ju0): r"""Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]' """ Nl = len(omega_laser) Neu = len(symb_omega_levelu) # We find the coefficients a_i of the field frequencies. a = [diff(expr, omega_laser[l]) for l in range(Nl)] # We look for a combination of the detunings obtained with the # function detunings_code. For each combination we sum the # detunings weighed by a_i. success = False for comb in combs: expr_try = 0 for l in range(Nl): expr_try += a[l]*(omega_laser[l] - symb_omega_levelu[comb[l][0]] + symb_omega_levelu[comb[l][1]]) if expr-expr_try == 0: success = True break assign = "" if success: for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "delta"+str(l+1) assign += "_"+str(comb[l][0]+1) assign += "_"+str(comb[l][1]+1) else: # We get the code for Hii using detuning knobs. # We find out the remainder terms. _remainder = expr - sum([a[l]*omega_laser[l] for l in range(Nl)]) # We find the coefficients of the remainder. b = [diff(_remainder, symb_omega_levelu[j]) for j in range(Neu)] # We calculate the remainder numerically. remainder = sum([b[j]*omega_levelu[j] for j in range(Neu)]) # We add the contributions from the detuning knobs. remainder += sum([a[l]*(omega_levelu[iu0[l]] - omega_levelu[ju0[l]]) for l in range(Nl)]) assign = str(remainder) # We get the code for Hii using detuning knobs. for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "detuning_knob["+str(l)+"]" return assign
[ "def", "detunings_rewrite", "(", "expr", ",", "combs", ",", "omega_laser", ",", "symb_omega_levelu", ",", "omega_levelu", ",", "iu0", ",", "ju0", ")", ":", "Nl", "=", "len", "(", "omega_laser", ")", "Neu", "=", "len", "(", "symb_omega_levelu", ")", "# We f...
r"""Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]'
[ "r", "Rewrite", "a", "symbolic", "expression", "in", "terms", "of", "allowed", "transition", "detunings", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L527-L624
train
50,441
oscarlazoarjona/fast
fast/bloch.py
term_code
def term_code(mu, nu, coef, matrix_form, rhouv_isconjugated, linear=True): r"""Get code to calculate a linear term. >>> term_code(1, 0, 33, False, False, True) ' rhs[1] += (33)*rho[0]\n' """ if coef == 0: return "" coef = str(coef) # We change E_{0i} -> E0[i-1] ini = coef.find("E_{0") fin = coef.find("}") if ini != -1: l = int(coef[ini+4: fin]) coef = coef[:ini]+"Ep["+str(l-1)+"]"+coef[fin+1:] # We change r[i, j] -> r[:, i, j] coef = coef.replace("rp[", "rp[:, ") coef = coef.replace("rm[", "rm[:, ") # We change symbolic complex-operations into fast numpy functions. coef = coef.replace("conjugate(", "np.conjugate(") coef = coef.replace("re(", "np.real(") coef = coef.replace("im(", "np.imag(") coef = coef.replace("*I", "j") if not linear: if matrix_form: s = " b["+str(mu)+"] += "+coef+"\n" else: s = " rhs["+str(mu)+"] += "+coef+"\n" return s # We add the syntax to calculate the term and store it in memory. s = " " if matrix_form: s += "A["+str(mu)+", "+str(nu)+"] += "+coef+"\n" else: s += "rhs["+str(mu)+"] += ("+coef+")" if rhouv_isconjugated: s += "*np.conjugate(rho["+str(nu)+'])\n' else: s += "*rho["+str(nu)+']\n' return s
python
def term_code(mu, nu, coef, matrix_form, rhouv_isconjugated, linear=True): r"""Get code to calculate a linear term. >>> term_code(1, 0, 33, False, False, True) ' rhs[1] += (33)*rho[0]\n' """ if coef == 0: return "" coef = str(coef) # We change E_{0i} -> E0[i-1] ini = coef.find("E_{0") fin = coef.find("}") if ini != -1: l = int(coef[ini+4: fin]) coef = coef[:ini]+"Ep["+str(l-1)+"]"+coef[fin+1:] # We change r[i, j] -> r[:, i, j] coef = coef.replace("rp[", "rp[:, ") coef = coef.replace("rm[", "rm[:, ") # We change symbolic complex-operations into fast numpy functions. coef = coef.replace("conjugate(", "np.conjugate(") coef = coef.replace("re(", "np.real(") coef = coef.replace("im(", "np.imag(") coef = coef.replace("*I", "j") if not linear: if matrix_form: s = " b["+str(mu)+"] += "+coef+"\n" else: s = " rhs["+str(mu)+"] += "+coef+"\n" return s # We add the syntax to calculate the term and store it in memory. s = " " if matrix_form: s += "A["+str(mu)+", "+str(nu)+"] += "+coef+"\n" else: s += "rhs["+str(mu)+"] += ("+coef+")" if rhouv_isconjugated: s += "*np.conjugate(rho["+str(nu)+'])\n' else: s += "*rho["+str(nu)+']\n' return s
[ "def", "term_code", "(", "mu", ",", "nu", ",", "coef", ",", "matrix_form", ",", "rhouv_isconjugated", ",", "linear", "=", "True", ")", ":", "if", "coef", "==", "0", ":", "return", "\"\"", "coef", "=", "str", "(", "coef", ")", "# We change E_{0i} -> E0[i-...
r"""Get code to calculate a linear term. >>> term_code(1, 0, 33, False, False, True) ' rhs[1] += (33)*rho[0]\n'
[ "r", "Get", "code", "to", "calculate", "a", "linear", "term", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L1481-L1528
train
50,442
oscarlazoarjona/fast
fast/bloch.py
observable
def observable(operator, rho, unfolding, complex=False): r"""Return an observable ammount. INPUT: - ``operator`` - An square matrix representing a hermitian operator \ in thesame basis as the density matrix. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> Ne = 2 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> rho = unfolding(rho) >>> sx = np.array([[0, 1], [1, 0]]) >>> print(observable(sx, rho, unfolding)) 2.0 """ if len(rho.shape) == 2: return np.array([observable(operator, i, unfolding) for i in rho]) Ne = unfolding.Ne Mu = unfolding.Mu obs = 0 if unfolding.normalized: rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)]) for i in range(Ne): for k in range(Ne): if unfolding.real: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: if k < i: u, v = (i, k) else: u, v = (k, i) obs += operator[i, k]*rho[Mu(1, u, v)] if k != i: if k < i: obs += 1j*operator[i, k]*rho[Mu(-1, u, v)] else: obs += -1j*operator[i, k]*rho[Mu(-1, u, v)] else: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: obs += operator[i, k]*rho[Mu(0, k, i)] if not complex: obs = np.real(obs) return obs
python
def observable(operator, rho, unfolding, complex=False): r"""Return an observable ammount. INPUT: - ``operator`` - An square matrix representing a hermitian operator \ in thesame basis as the density matrix. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> Ne = 2 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> rho = unfolding(rho) >>> sx = np.array([[0, 1], [1, 0]]) >>> print(observable(sx, rho, unfolding)) 2.0 """ if len(rho.shape) == 2: return np.array([observable(operator, i, unfolding) for i in rho]) Ne = unfolding.Ne Mu = unfolding.Mu obs = 0 if unfolding.normalized: rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)]) for i in range(Ne): for k in range(Ne): if unfolding.real: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: if k < i: u, v = (i, k) else: u, v = (k, i) obs += operator[i, k]*rho[Mu(1, u, v)] if k != i: if k < i: obs += 1j*operator[i, k]*rho[Mu(-1, u, v)] else: obs += -1j*operator[i, k]*rho[Mu(-1, u, v)] else: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: obs += operator[i, k]*rho[Mu(0, k, i)] if not complex: obs = np.real(obs) return obs
[ "def", "observable", "(", "operator", ",", "rho", ",", "unfolding", ",", "complex", "=", "False", ")", ":", "if", "len", "(", "rho", ".", "shape", ")", "==", "2", ":", "return", "np", ".", "array", "(", "[", "observable", "(", "operator", ",", "i",...
r"""Return an observable ammount. INPUT: - ``operator`` - An square matrix representing a hermitian operator \ in thesame basis as the density matrix. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> Ne = 2 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> rho = unfolding(rho) >>> sx = np.array([[0, 1], [1, 0]]) >>> print(observable(sx, rho, unfolding)) 2.0
[ "r", "Return", "an", "observable", "ammount", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L3161-L3217
train
50,443
oscarlazoarjona/fast
fast/bloch.py
electric_succeptibility
def electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, part=0): r"""Return the electric succeptibility for a given field. INPUT: - ``l`` - The index labeling the probe field. - ``Ep`` - A list of the amplitudes of all pump fields. - ``epsilonp`` - The polarization vector of the probe field. - ``rm`` - The below-diagonal components of the position operator \ in the cartesian basis: - ``n`` - The number density of atoms. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> import numpy as np >>> from sympy import symbols >>> from scipy.constants import physical_constants >>> from fast import vapour_number_density >>> e_num = physical_constants["elementary charge"][0] >>> hbar_num = physical_constants["Planck constant over 2 pi"][0] >>> Ne = 2 >>> Nl = 1 >>> Ep = [-1.0] >>> epsilonp = np.array([[0, 0, 1.0]]) >>> delta = symbols("delta") >>> detuning_knob = [delta] >>> gamma = np.array([[0.0, -1.0], [1.0, 0.0]]) >>> omega_level = np.array([0.0, 100.0]) >>> rm = [np.array([[0.0, 0.0], [1.0, 0.0]])*hbar_num/e_num ... for p in range(3)] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> theta = phase_transformation(Ne, Nl, rm, xi) >>> sweep_steady_state = fast_sweep_steady_state(Ep, epsilonp, gamma, ... omega_level, rm, xi, ... theta) >>> deltas, rho = sweep_steady_state([[-20, 20, 11]]) >>> n = vapour_number_density(273.15+20, "Rb") >>> unfolding = Unfolding(Ne, True, True, True) >>> chire = electric_succeptibility(0, Ep, epsilonp, rm, n, ... rho, unfolding) >>> print(chire) [ 4.4824e-09-1.1206e-10j 5.5971e-09-1.7491e-10j 7.4459e-09-3.1024e-10j 1.1097e-08-6.9356e-10j 2.1449e-08-2.6811e-09j 0.0000e+00-5.9877e-08j -2.1449e-08-2.6811e-09j -1.1097e-08-6.9356e-10j -7.4459e-09-3.1024e-10j -5.5971e-09-1.7491e-10j -4.4824e-09-1.1206e-10j] """ epsilonm = epsilonp.conjugate() rp = np.array([rm[i].transpose().conjugate() for i in range(3)]) if part == 1: op = cartesian_dot_product(rp, epsilonm[0]) op += cartesian_dot_product(rm, epsilonp[0]) op = -e_num*n/epsilon_0_num/np.abs(Ep[0])*op elif part == -1: op = cartesian_dot_product(rm, epsilonp[0]) op += - cartesian_dot_product(rp, epsilonm[0]) op = -1j*e_num*n/epsilon_0_num/np.abs(Ep[0])*op elif part == 0: chire = electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, +1) chiim = electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, -1) return chire + 1j*chiim return np.real(observable(op, rho, unfolding))
python
def electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, part=0): r"""Return the electric succeptibility for a given field. INPUT: - ``l`` - The index labeling the probe field. - ``Ep`` - A list of the amplitudes of all pump fields. - ``epsilonp`` - The polarization vector of the probe field. - ``rm`` - The below-diagonal components of the position operator \ in the cartesian basis: - ``n`` - The number density of atoms. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> import numpy as np >>> from sympy import symbols >>> from scipy.constants import physical_constants >>> from fast import vapour_number_density >>> e_num = physical_constants["elementary charge"][0] >>> hbar_num = physical_constants["Planck constant over 2 pi"][0] >>> Ne = 2 >>> Nl = 1 >>> Ep = [-1.0] >>> epsilonp = np.array([[0, 0, 1.0]]) >>> delta = symbols("delta") >>> detuning_knob = [delta] >>> gamma = np.array([[0.0, -1.0], [1.0, 0.0]]) >>> omega_level = np.array([0.0, 100.0]) >>> rm = [np.array([[0.0, 0.0], [1.0, 0.0]])*hbar_num/e_num ... for p in range(3)] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> theta = phase_transformation(Ne, Nl, rm, xi) >>> sweep_steady_state = fast_sweep_steady_state(Ep, epsilonp, gamma, ... omega_level, rm, xi, ... theta) >>> deltas, rho = sweep_steady_state([[-20, 20, 11]]) >>> n = vapour_number_density(273.15+20, "Rb") >>> unfolding = Unfolding(Ne, True, True, True) >>> chire = electric_succeptibility(0, Ep, epsilonp, rm, n, ... rho, unfolding) >>> print(chire) [ 4.4824e-09-1.1206e-10j 5.5971e-09-1.7491e-10j 7.4459e-09-3.1024e-10j 1.1097e-08-6.9356e-10j 2.1449e-08-2.6811e-09j 0.0000e+00-5.9877e-08j -2.1449e-08-2.6811e-09j -1.1097e-08-6.9356e-10j -7.4459e-09-3.1024e-10j -5.5971e-09-1.7491e-10j -4.4824e-09-1.1206e-10j] """ epsilonm = epsilonp.conjugate() rp = np.array([rm[i].transpose().conjugate() for i in range(3)]) if part == 1: op = cartesian_dot_product(rp, epsilonm[0]) op += cartesian_dot_product(rm, epsilonp[0]) op = -e_num*n/epsilon_0_num/np.abs(Ep[0])*op elif part == -1: op = cartesian_dot_product(rm, epsilonp[0]) op += - cartesian_dot_product(rp, epsilonm[0]) op = -1j*e_num*n/epsilon_0_num/np.abs(Ep[0])*op elif part == 0: chire = electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, +1) chiim = electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, -1) return chire + 1j*chiim return np.real(observable(op, rho, unfolding))
[ "def", "electric_succeptibility", "(", "l", ",", "Ep", ",", "epsilonp", ",", "rm", ",", "n", ",", "rho", ",", "unfolding", ",", "part", "=", "0", ")", ":", "epsilonm", "=", "epsilonp", ".", "conjugate", "(", ")", "rp", "=", "np", ".", "array", "(",...
r"""Return the electric succeptibility for a given field. INPUT: - ``l`` - The index labeling the probe field. - ``Ep`` - A list of the amplitudes of all pump fields. - ``epsilonp`` - The polarization vector of the probe field. - ``rm`` - The below-diagonal components of the position operator \ in the cartesian basis: - ``n`` - The number density of atoms. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> import numpy as np >>> from sympy import symbols >>> from scipy.constants import physical_constants >>> from fast import vapour_number_density >>> e_num = physical_constants["elementary charge"][0] >>> hbar_num = physical_constants["Planck constant over 2 pi"][0] >>> Ne = 2 >>> Nl = 1 >>> Ep = [-1.0] >>> epsilonp = np.array([[0, 0, 1.0]]) >>> delta = symbols("delta") >>> detuning_knob = [delta] >>> gamma = np.array([[0.0, -1.0], [1.0, 0.0]]) >>> omega_level = np.array([0.0, 100.0]) >>> rm = [np.array([[0.0, 0.0], [1.0, 0.0]])*hbar_num/e_num ... for p in range(3)] >>> xi = np.array([[[0, 1], [1, 0]]]) >>> theta = phase_transformation(Ne, Nl, rm, xi) >>> sweep_steady_state = fast_sweep_steady_state(Ep, epsilonp, gamma, ... omega_level, rm, xi, ... theta) >>> deltas, rho = sweep_steady_state([[-20, 20, 11]]) >>> n = vapour_number_density(273.15+20, "Rb") >>> unfolding = Unfolding(Ne, True, True, True) >>> chire = electric_succeptibility(0, Ep, epsilonp, rm, n, ... rho, unfolding) >>> print(chire) [ 4.4824e-09-1.1206e-10j 5.5971e-09-1.7491e-10j 7.4459e-09-3.1024e-10j 1.1097e-08-6.9356e-10j 2.1449e-08-2.6811e-09j 0.0000e+00-5.9877e-08j -2.1449e-08-2.6811e-09j -1.1097e-08-6.9356e-10j -7.4459e-09-3.1024e-10j -5.5971e-09-1.7491e-10j -4.4824e-09-1.1206e-10j]
[ "r", "Return", "the", "electric", "succeptibility", "for", "a", "given", "field", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L3220-L3290
train
50,444
oscarlazoarjona/fast
fast/bloch.py
radiated_intensity
def radiated_intensity(rho, i, j, epsilonp, rm, omega_level, xi, N, D, unfolding): r"""Return the radiated intensity in a given direction. >>> from fast import State, Integer, split_hyperfine_to_magnetic >>> g = State("Rb", 87, 5, 1, 3/Integer(2), 0) >>> e = State("Rb", 87, 4, 2, 5/Integer(2), 1) >>> magnetic_states = split_hyperfine_to_magnetic([g, e]) >>> omega0 = magnetic_states[0].omega >>> omega_level = [ei.omega - omega0 for ei in magnetic_states] >>> Ne = len(magnetic_states) >>> N = 4e6 >>> D = 0.1 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.zeros((Ne, Ne)) >>> rho[0, 0] = 0.8 >>> rho[3, 3] = 0.2 >>> rho[3, 0] = 0.3 >>> rho[0, 3] = 0.3 >>> rho = unfolding(rho) >>> ep = np.array([1, 1j, 0])/np.sqrt(2.0) >>> ex = np.array([1, 0, 0]) >>> r0 = 4.75278521538619e-11 >>> rm = np.zeros((3, Ne, Ne), complex) >>> rm[0, 1, 0] = -r0 >>> rm[0, 3, 0] = r0 >>> rm[1, 1, 0] = -1j*r0 >>> rm[1, 3, 0] = -1j*r0 >>> rm[1, 2, 0] = -np.sqrt(2)*r0 >>> xi = np.zeros((1, Ne, Ne)) >>> xi[0, 1, 0] = 1 >>> xi[0, 2, 0] = 1 >>> xi[0, 3, 0] = 1 >>> xi[0, :, :] += xi[0, :, :].transpose() >>> print(radiated_intensity(rho, 1, 0, ex, rm, ... omega_level, xi, N, D, unfolding)) 4.60125990174e-22 """ def inij(i, j, ilist, jlist): if (i in ilist) and (j in jlist): return 1 else: return 0 rm = np.array(rm) Nl = xi.shape[0] Ne = xi.shape[1] aux = define_simplification(omega_level, xi, Nl) u = aux[0] omega_levelu = aux[2] ui = u(i) uj = u(j) omegaij = omega_levelu[ui] - omega_levelu[uj] ilist = [ii for ii in range(Ne) if u(ii) == ui] jlist = [jj for jj in range(Ne) if u(jj) == uj] rp = np.array([rm[ii].conjugate().transpose() for ii in range(3)]) rm = np.array([[[rm[p, ii, jj]*inij(ii, jj, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) rp = np.array([[[rp[p, ii, jj]*inij(jj, ii, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) epsilonm = epsilonp.conjugate() Adag = cartesian_dot_product(rm, epsilonp) A = cartesian_dot_product(rp, epsilonm) fact = alpha_num*N*hbar_num*omegaij**3/2/np.pi/c_num**2/D**2 Iop = fact * np.dot(Adag, A) intensity = observable(Iop, rho, unfolding) intensity = float(np.real(intensity)) return intensity
python
def radiated_intensity(rho, i, j, epsilonp, rm, omega_level, xi, N, D, unfolding): r"""Return the radiated intensity in a given direction. >>> from fast import State, Integer, split_hyperfine_to_magnetic >>> g = State("Rb", 87, 5, 1, 3/Integer(2), 0) >>> e = State("Rb", 87, 4, 2, 5/Integer(2), 1) >>> magnetic_states = split_hyperfine_to_magnetic([g, e]) >>> omega0 = magnetic_states[0].omega >>> omega_level = [ei.omega - omega0 for ei in magnetic_states] >>> Ne = len(magnetic_states) >>> N = 4e6 >>> D = 0.1 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.zeros((Ne, Ne)) >>> rho[0, 0] = 0.8 >>> rho[3, 3] = 0.2 >>> rho[3, 0] = 0.3 >>> rho[0, 3] = 0.3 >>> rho = unfolding(rho) >>> ep = np.array([1, 1j, 0])/np.sqrt(2.0) >>> ex = np.array([1, 0, 0]) >>> r0 = 4.75278521538619e-11 >>> rm = np.zeros((3, Ne, Ne), complex) >>> rm[0, 1, 0] = -r0 >>> rm[0, 3, 0] = r0 >>> rm[1, 1, 0] = -1j*r0 >>> rm[1, 3, 0] = -1j*r0 >>> rm[1, 2, 0] = -np.sqrt(2)*r0 >>> xi = np.zeros((1, Ne, Ne)) >>> xi[0, 1, 0] = 1 >>> xi[0, 2, 0] = 1 >>> xi[0, 3, 0] = 1 >>> xi[0, :, :] += xi[0, :, :].transpose() >>> print(radiated_intensity(rho, 1, 0, ex, rm, ... omega_level, xi, N, D, unfolding)) 4.60125990174e-22 """ def inij(i, j, ilist, jlist): if (i in ilist) and (j in jlist): return 1 else: return 0 rm = np.array(rm) Nl = xi.shape[0] Ne = xi.shape[1] aux = define_simplification(omega_level, xi, Nl) u = aux[0] omega_levelu = aux[2] ui = u(i) uj = u(j) omegaij = omega_levelu[ui] - omega_levelu[uj] ilist = [ii for ii in range(Ne) if u(ii) == ui] jlist = [jj for jj in range(Ne) if u(jj) == uj] rp = np.array([rm[ii].conjugate().transpose() for ii in range(3)]) rm = np.array([[[rm[p, ii, jj]*inij(ii, jj, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) rp = np.array([[[rp[p, ii, jj]*inij(jj, ii, ilist, jlist) for jj in range(Ne)] for ii in range(Ne)] for p in range(3)]) epsilonm = epsilonp.conjugate() Adag = cartesian_dot_product(rm, epsilonp) A = cartesian_dot_product(rp, epsilonm) fact = alpha_num*N*hbar_num*omegaij**3/2/np.pi/c_num**2/D**2 Iop = fact * np.dot(Adag, A) intensity = observable(Iop, rho, unfolding) intensity = float(np.real(intensity)) return intensity
[ "def", "radiated_intensity", "(", "rho", ",", "i", ",", "j", ",", "epsilonp", ",", "rm", ",", "omega_level", ",", "xi", ",", "N", ",", "D", ",", "unfolding", ")", ":", "def", "inij", "(", "i", ",", "j", ",", "ilist", ",", "jlist", ")", ":", "if...
r"""Return the radiated intensity in a given direction. >>> from fast import State, Integer, split_hyperfine_to_magnetic >>> g = State("Rb", 87, 5, 1, 3/Integer(2), 0) >>> e = State("Rb", 87, 4, 2, 5/Integer(2), 1) >>> magnetic_states = split_hyperfine_to_magnetic([g, e]) >>> omega0 = magnetic_states[0].omega >>> omega_level = [ei.omega - omega0 for ei in magnetic_states] >>> Ne = len(magnetic_states) >>> N = 4e6 >>> D = 0.1 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.zeros((Ne, Ne)) >>> rho[0, 0] = 0.8 >>> rho[3, 3] = 0.2 >>> rho[3, 0] = 0.3 >>> rho[0, 3] = 0.3 >>> rho = unfolding(rho) >>> ep = np.array([1, 1j, 0])/np.sqrt(2.0) >>> ex = np.array([1, 0, 0]) >>> r0 = 4.75278521538619e-11 >>> rm = np.zeros((3, Ne, Ne), complex) >>> rm[0, 1, 0] = -r0 >>> rm[0, 3, 0] = r0 >>> rm[1, 1, 0] = -1j*r0 >>> rm[1, 3, 0] = -1j*r0 >>> rm[1, 2, 0] = -np.sqrt(2)*r0 >>> xi = np.zeros((1, Ne, Ne)) >>> xi[0, 1, 0] = 1 >>> xi[0, 2, 0] = 1 >>> xi[0, 3, 0] = 1 >>> xi[0, :, :] += xi[0, :, :].transpose() >>> print(radiated_intensity(rho, 1, 0, ex, rm, ... omega_level, xi, N, D, unfolding)) 4.60125990174e-22
[ "r", "Return", "the", "radiated", "intensity", "in", "a", "given", "direction", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L3293-L3378
train
50,445
oscarlazoarjona/fast
fast/bloch.py
Unfolding.inverse
def inverse(self, rhov, time_derivative=False): r"""Fold a vector into a matrix. The input of this function can be a numpy array or a sympy Matrix. If the input is understood to represent the time derivative of a density matrix, then the flag time_derivative must be set to True. >>> unfolding = Unfolding(2, real=True, lower_triangular=True, ... normalized=True) >>> rhos = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> print(rhos == unfolding.inverse(unfolding(rhos))) [[ True True] [ True True]] >>> from fast import define_density_matrix >>> from sympy import pprint >>> rho = define_density_matrix(2) >>> pprint(unfolding.inverse(unfolding(rho)), use_unicode=False) [ -rho22 + 1 re(rho21) - I*im(rho21)] [ ] [re(rho21) + I*im(rho21) rho22 ] >>> rhops = np.array([[0.0, 0.0], ... [0.0, 0.0]]) >>> print(unfolding.inverse(unfolding(rhops), True)) [[-0.-0.j 0.-0.j] [ 0.+0.j 0.+0.j]] """ Ne = self.Ne Nrho = self.Nrho IJ = self.IJ if isinstance(rhov, np.ndarray): rho = np.zeros((Ne, Ne), complex) numeric = True elif isinstance(rhov, sympy.Matrix): rho = sympy.zeros(Ne, Ne) numeric = False for mu in range(Nrho): s, i, j = IJ(mu) if numeric: if s == 1: rho[i, j] += rhov[mu] elif s == -1: rho[i, j] += 1j*rhov[mu] elif s == 0: rho[i, j] += rhov[mu] else: if s == 1: rho[i, j] += rhov[mu] elif s == -1: rho[i, j] += sympy.I*rhov[mu] elif s == 0: rho[i, j] += rhov[mu] if self.lower_triangular: for i in range(Ne): for j in range(i): rho[j, i] = rho[i, j].conjugate() if self.normalized: if time_derivative: rho[0, 0] = -sum([rho[i, i] for i in range(1, Ne)]) else: rho[0, 0] = 1-sum([rho[i, i] for i in range(1, Ne)]) return rho
python
def inverse(self, rhov, time_derivative=False): r"""Fold a vector into a matrix. The input of this function can be a numpy array or a sympy Matrix. If the input is understood to represent the time derivative of a density matrix, then the flag time_derivative must be set to True. >>> unfolding = Unfolding(2, real=True, lower_triangular=True, ... normalized=True) >>> rhos = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> print(rhos == unfolding.inverse(unfolding(rhos))) [[ True True] [ True True]] >>> from fast import define_density_matrix >>> from sympy import pprint >>> rho = define_density_matrix(2) >>> pprint(unfolding.inverse(unfolding(rho)), use_unicode=False) [ -rho22 + 1 re(rho21) - I*im(rho21)] [ ] [re(rho21) + I*im(rho21) rho22 ] >>> rhops = np.array([[0.0, 0.0], ... [0.0, 0.0]]) >>> print(unfolding.inverse(unfolding(rhops), True)) [[-0.-0.j 0.-0.j] [ 0.+0.j 0.+0.j]] """ Ne = self.Ne Nrho = self.Nrho IJ = self.IJ if isinstance(rhov, np.ndarray): rho = np.zeros((Ne, Ne), complex) numeric = True elif isinstance(rhov, sympy.Matrix): rho = sympy.zeros(Ne, Ne) numeric = False for mu in range(Nrho): s, i, j = IJ(mu) if numeric: if s == 1: rho[i, j] += rhov[mu] elif s == -1: rho[i, j] += 1j*rhov[mu] elif s == 0: rho[i, j] += rhov[mu] else: if s == 1: rho[i, j] += rhov[mu] elif s == -1: rho[i, j] += sympy.I*rhov[mu] elif s == 0: rho[i, j] += rhov[mu] if self.lower_triangular: for i in range(Ne): for j in range(i): rho[j, i] = rho[i, j].conjugate() if self.normalized: if time_derivative: rho[0, 0] = -sum([rho[i, i] for i in range(1, Ne)]) else: rho[0, 0] = 1-sum([rho[i, i] for i in range(1, Ne)]) return rho
[ "def", "inverse", "(", "self", ",", "rhov", ",", "time_derivative", "=", "False", ")", ":", "Ne", "=", "self", ".", "Ne", "Nrho", "=", "self", ".", "Nrho", "IJ", "=", "self", ".", "IJ", "if", "isinstance", "(", "rhov", ",", "np", ".", "ndarray", ...
r"""Fold a vector into a matrix. The input of this function can be a numpy array or a sympy Matrix. If the input is understood to represent the time derivative of a density matrix, then the flag time_derivative must be set to True. >>> unfolding = Unfolding(2, real=True, lower_triangular=True, ... normalized=True) >>> rhos = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> print(rhos == unfolding.inverse(unfolding(rhos))) [[ True True] [ True True]] >>> from fast import define_density_matrix >>> from sympy import pprint >>> rho = define_density_matrix(2) >>> pprint(unfolding.inverse(unfolding(rho)), use_unicode=False) [ -rho22 + 1 re(rho21) - I*im(rho21)] [ ] [re(rho21) + I*im(rho21) rho22 ] >>> rhops = np.array([[0.0, 0.0], ... [0.0, 0.0]]) >>> print(unfolding.inverse(unfolding(rhops), True)) [[-0.-0.j 0.-0.j] [ 0.+0.j 0.+0.j]]
[ "r", "Fold", "a", "vector", "into", "a", "matrix", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L1280-L1351
train
50,446
tilde-lab/tilde
tilde/berlinium/plotter.py
eplotter
def eplotter(task, data): # CRYSTAL, VASP, EXCITING ''' eplotter is like bdplotter but less complicated ''' results, color, fdata = [], None, [] if task == 'optstory': color = '#CC0000' clickable = True for n, i in enumerate(data): fdata.append([n, i[4]]) fdata = array(fdata) fdata[:,1] -= min(fdata[:,1]) # this normalizes values to minimum (by 2nd col) fdata = fdata.tolist() elif task == 'convergence': color = '#0066CC' clickable = False for n, i in enumerate(data): fdata.append([n, i]) for n in range(len(fdata)): #fdata[n][1] = "%10.5f" % fdata[n][1] fdata[n][1] = round(fdata[n][1], 5) results.append({'color': color, 'clickable:': clickable, 'data': fdata}) return results
python
def eplotter(task, data): # CRYSTAL, VASP, EXCITING ''' eplotter is like bdplotter but less complicated ''' results, color, fdata = [], None, [] if task == 'optstory': color = '#CC0000' clickable = True for n, i in enumerate(data): fdata.append([n, i[4]]) fdata = array(fdata) fdata[:,1] -= min(fdata[:,1]) # this normalizes values to minimum (by 2nd col) fdata = fdata.tolist() elif task == 'convergence': color = '#0066CC' clickable = False for n, i in enumerate(data): fdata.append([n, i]) for n in range(len(fdata)): #fdata[n][1] = "%10.5f" % fdata[n][1] fdata[n][1] = round(fdata[n][1], 5) results.append({'color': color, 'clickable:': clickable, 'data': fdata}) return results
[ "def", "eplotter", "(", "task", ",", "data", ")", ":", "# CRYSTAL, VASP, EXCITING", "results", ",", "color", ",", "fdata", "=", "[", "]", ",", "None", ",", "[", "]", "if", "task", "==", "'optstory'", ":", "color", "=", "'#CC0000'", "clickable", "=", "T...
eplotter is like bdplotter but less complicated
[ "eplotter", "is", "like", "bdplotter", "but", "less", "complicated" ]
59841578b3503075aa85c76f9ae647b3ff92b0a3
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/berlinium/plotter.py#L151-L177
train
50,447
commontk/ctk-cli
ctk_cli/execution.py
popenCLIExecutable
def popenCLIExecutable(command, **kwargs): """Wrapper around subprocess.Popen constructor that tries to detect Slicer CLI modules and launches them through the Slicer launcher in order to prevent potential DLL dependency issues. Any kwargs are passed on to subprocess.Popen(). If you ever try to use this function to run a CLI, you might want to take a look at https://github.com/hmeine/MeVisLab-CLI/blob/master/Modules/Macros/CTK_CLI/CLIModuleBackend.py (in particular, the CLIExecution class.) Ideally, more of that code would be extracted and moved here, but I have not gotten around to doing that yet. """ cliExecutable = command[0] # hack (at least, this does not scale to other module sources): # detect Slicer modules and run through wrapper script setting up # appropriate runtime environment ma = re_slicerSubPath.search(cliExecutable) if ma: wrapper = os.path.join(cliExecutable[:ma.start()], 'Slicer') if sys.platform.startswith('win'): wrapper += '.exe' if os.path.exists(wrapper): command = [wrapper, '--launcher-no-splash', '--launch'] + command return subprocess.Popen(command, **kwargs)
python
def popenCLIExecutable(command, **kwargs): """Wrapper around subprocess.Popen constructor that tries to detect Slicer CLI modules and launches them through the Slicer launcher in order to prevent potential DLL dependency issues. Any kwargs are passed on to subprocess.Popen(). If you ever try to use this function to run a CLI, you might want to take a look at https://github.com/hmeine/MeVisLab-CLI/blob/master/Modules/Macros/CTK_CLI/CLIModuleBackend.py (in particular, the CLIExecution class.) Ideally, more of that code would be extracted and moved here, but I have not gotten around to doing that yet. """ cliExecutable = command[0] # hack (at least, this does not scale to other module sources): # detect Slicer modules and run through wrapper script setting up # appropriate runtime environment ma = re_slicerSubPath.search(cliExecutable) if ma: wrapper = os.path.join(cliExecutable[:ma.start()], 'Slicer') if sys.platform.startswith('win'): wrapper += '.exe' if os.path.exists(wrapper): command = [wrapper, '--launcher-no-splash', '--launch'] + command return subprocess.Popen(command, **kwargs)
[ "def", "popenCLIExecutable", "(", "command", ",", "*", "*", "kwargs", ")", ":", "cliExecutable", "=", "command", "[", "0", "]", "# hack (at least, this does not scale to other module sources):", "# detect Slicer modules and run through wrapper script setting up", "# appropriate r...
Wrapper around subprocess.Popen constructor that tries to detect Slicer CLI modules and launches them through the Slicer launcher in order to prevent potential DLL dependency issues. Any kwargs are passed on to subprocess.Popen(). If you ever try to use this function to run a CLI, you might want to take a look at https://github.com/hmeine/MeVisLab-CLI/blob/master/Modules/Macros/CTK_CLI/CLIModuleBackend.py (in particular, the CLIExecution class.) Ideally, more of that code would be extracted and moved here, but I have not gotten around to doing that yet.
[ "Wrapper", "around", "subprocess", ".", "Popen", "constructor", "that", "tries", "to", "detect", "Slicer", "CLI", "modules", "and", "launches", "them", "through", "the", "Slicer", "launcher", "in", "order", "to", "prevent", "potential", "DLL", "dependency", "iss...
ddd8de62b586491ad6e6750133cc1f0e11f37b11
https://github.com/commontk/ctk-cli/blob/ddd8de62b586491ad6e6750133cc1f0e11f37b11/ctk_cli/execution.py#L41-L69
train
50,448
alexwlchan/specktre
src/specktre/utils.py
_candidate_filenames
def _candidate_filenames(): """Generates filenames of the form 'specktre_123AB.png'. The random noise is five characters long, which allows for 62^5 = 916 million possible filenames. """ while True: random_stub = ''.join([ random.choice(string.ascii_letters + string.digits) for _ in range(5) ]) yield 'specktre_%s.png' % random_stub
python
def _candidate_filenames(): """Generates filenames of the form 'specktre_123AB.png'. The random noise is five characters long, which allows for 62^5 = 916 million possible filenames. """ while True: random_stub = ''.join([ random.choice(string.ascii_letters + string.digits) for _ in range(5) ]) yield 'specktre_%s.png' % random_stub
[ "def", "_candidate_filenames", "(", ")", ":", "while", "True", ":", "random_stub", "=", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "5", ...
Generates filenames of the form 'specktre_123AB.png'. The random noise is five characters long, which allows for 62^5 = 916 million possible filenames.
[ "Generates", "filenames", "of", "the", "form", "specktre_123AB", ".", "png", "." ]
dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc
https://github.com/alexwlchan/specktre/blob/dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc/src/specktre/utils.py#L9-L21
train
50,449
alexwlchan/specktre
examples/draw_tilings.py
draw_tiling
def draw_tiling(coord_generator, filename): """Given a coordinate generator and a filename, render those coordinates in a new image and save them to the file.""" im = Image.new('L', size=(CANVAS_WIDTH, CANVAS_HEIGHT)) for shape in coord_generator(CANVAS_WIDTH, CANVAS_HEIGHT): ImageDraw.Draw(im).polygon(shape, outline='white') im.save(filename)
python
def draw_tiling(coord_generator, filename): """Given a coordinate generator and a filename, render those coordinates in a new image and save them to the file.""" im = Image.new('L', size=(CANVAS_WIDTH, CANVAS_HEIGHT)) for shape in coord_generator(CANVAS_WIDTH, CANVAS_HEIGHT): ImageDraw.Draw(im).polygon(shape, outline='white') im.save(filename)
[ "def", "draw_tiling", "(", "coord_generator", ",", "filename", ")", ":", "im", "=", "Image", ".", "new", "(", "'L'", ",", "size", "=", "(", "CANVAS_WIDTH", ",", "CANVAS_HEIGHT", ")", ")", "for", "shape", "in", "coord_generator", "(", "CANVAS_WIDTH", ",", ...
Given a coordinate generator and a filename, render those coordinates in a new image and save them to the file.
[ "Given", "a", "coordinate", "generator", "and", "a", "filename", "render", "those", "coordinates", "in", "a", "new", "image", "and", "save", "them", "to", "the", "file", "." ]
dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc
https://github.com/alexwlchan/specktre/blob/dcdd0d5486e5c3f612f64221b2e0dbc6fb7adafc/examples/draw_tilings.py#L20-L26
train
50,450
abn/cafeteria
cafeteria/logging/trace.py
trace
def trace(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'TRACE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.trace("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(TRACE): self._log(TRACE, msg, args, **kwargs)
python
def trace(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'TRACE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.trace("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(TRACE): self._log(TRACE, msg, args, **kwargs)
[ "def", "trace", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "isEnabledFor", "(", "TRACE", ")", ":", "self", ".", "_log", "(", "TRACE", ",", "msg", ",", "args", ",", "*", "*", "kwargs", ")" ]
Log 'msg % args' with severity 'TRACE'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.trace("Houston, we have a %s", "thorny problem", exc_info=1)
[ "Log", "msg", "%", "args", "with", "severity", "TRACE", "." ]
0a2efb0529484d6da08568f4364daff77f734dfd
https://github.com/abn/cafeteria/blob/0a2efb0529484d6da08568f4364daff77f734dfd/cafeteria/logging/trace.py#L8-L18
train
50,451
abn/cafeteria
cafeteria/patterns/borg.py
BorgStateManager.get_state
def get_state(cls, clz): """ Retrieve the state of a given Class. :param clz: types.ClassType :return: Class state. :rtype: dict """ if clz not in cls.__shared_state: cls.__shared_state[clz] = ( clz.init_state() if hasattr(clz, "init_state") else {} ) return cls.__shared_state[clz]
python
def get_state(cls, clz): """ Retrieve the state of a given Class. :param clz: types.ClassType :return: Class state. :rtype: dict """ if clz not in cls.__shared_state: cls.__shared_state[clz] = ( clz.init_state() if hasattr(clz, "init_state") else {} ) return cls.__shared_state[clz]
[ "def", "get_state", "(", "cls", ",", "clz", ")", ":", "if", "clz", "not", "in", "cls", ".", "__shared_state", ":", "cls", ".", "__shared_state", "[", "clz", "]", "=", "(", "clz", ".", "init_state", "(", ")", "if", "hasattr", "(", "clz", ",", "\"ini...
Retrieve the state of a given Class. :param clz: types.ClassType :return: Class state. :rtype: dict
[ "Retrieve", "the", "state", "of", "a", "given", "Class", "." ]
0a2efb0529484d6da08568f4364daff77f734dfd
https://github.com/abn/cafeteria/blob/0a2efb0529484d6da08568f4364daff77f734dfd/cafeteria/patterns/borg.py#L19-L31
train
50,452
oscarlazoarjona/fast
build/lib/fast/symbolic.py
define_density_matrix
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False, variables=None): r"""Return a symbolic density matrix. The arguments are Ne (integer): The number of atomic states. explicitly_hermitian (boolean): Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$ normalized (boolean): Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$ A very simple example: >>> define_density_matrix(2) Matrix([ [rho11, rho12], [rho21, rho22]]) The density matrix can be made explicitly hermitian >>> define_density_matrix(2, explicitly_hermitian=True) Matrix([ [rho11, conjugate(rho21)], [rho21, rho22]]) or normalized >>> define_density_matrix(2, normalized=True) Matrix([ [-rho22 + 1, rho12], [ rho21, rho22]]) or it can be made an explicit function of given variables >>> from sympy import symbols >>> t, z = symbols("t, z", positive=True) >>> define_density_matrix(2, variables=[t, z]) Matrix([ [rho11(t, z), rho12(t, z)], [rho21(t, z), rho22(t, z)]]) """ if Ne > 9: comma = "," name = r"\rho" open_brace = "_{" close_brace = "}" else: comma = "" name = "rho" open_brace = "" close_brace = "" rho = [] for i in range(Ne): row_rho = [] for j in range(Ne): if i == j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables, positive=True)] elif i > j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] else: if explicitly_hermitian: row_rho += [conjugate(define_symbol(name, open_brace, comma, j, i, close_brace, variables))] else: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] rho += [row_rho] if normalized: rho11 = 1-sum([rho[i][i] for i in range(1, Ne)]) rho[0][0] = rho11 rho = Matrix(rho) return rho
python
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False, variables=None): r"""Return a symbolic density matrix. The arguments are Ne (integer): The number of atomic states. explicitly_hermitian (boolean): Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$ normalized (boolean): Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$ A very simple example: >>> define_density_matrix(2) Matrix([ [rho11, rho12], [rho21, rho22]]) The density matrix can be made explicitly hermitian >>> define_density_matrix(2, explicitly_hermitian=True) Matrix([ [rho11, conjugate(rho21)], [rho21, rho22]]) or normalized >>> define_density_matrix(2, normalized=True) Matrix([ [-rho22 + 1, rho12], [ rho21, rho22]]) or it can be made an explicit function of given variables >>> from sympy import symbols >>> t, z = symbols("t, z", positive=True) >>> define_density_matrix(2, variables=[t, z]) Matrix([ [rho11(t, z), rho12(t, z)], [rho21(t, z), rho22(t, z)]]) """ if Ne > 9: comma = "," name = r"\rho" open_brace = "_{" close_brace = "}" else: comma = "" name = "rho" open_brace = "" close_brace = "" rho = [] for i in range(Ne): row_rho = [] for j in range(Ne): if i == j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables, positive=True)] elif i > j: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] else: if explicitly_hermitian: row_rho += [conjugate(define_symbol(name, open_brace, comma, j, i, close_brace, variables))] else: row_rho += [define_symbol(name, open_brace, comma, i, j, close_brace, variables)] rho += [row_rho] if normalized: rho11 = 1-sum([rho[i][i] for i in range(1, Ne)]) rho[0][0] = rho11 rho = Matrix(rho) return rho
[ "def", "define_density_matrix", "(", "Ne", ",", "explicitly_hermitian", "=", "False", ",", "normalized", "=", "False", ",", "variables", "=", "None", ")", ":", "if", "Ne", ">", "9", ":", "comma", "=", "\",\"", "name", "=", "r\"\\rho\"", "open_brace", "=", ...
r"""Return a symbolic density matrix. The arguments are Ne (integer): The number of atomic states. explicitly_hermitian (boolean): Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$ normalized (boolean): Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$ A very simple example: >>> define_density_matrix(2) Matrix([ [rho11, rho12], [rho21, rho22]]) The density matrix can be made explicitly hermitian >>> define_density_matrix(2, explicitly_hermitian=True) Matrix([ [rho11, conjugate(rho21)], [rho21, rho22]]) or normalized >>> define_density_matrix(2, normalized=True) Matrix([ [-rho22 + 1, rho12], [ rho21, rho22]]) or it can be made an explicit function of given variables >>> from sympy import symbols >>> t, z = symbols("t, z", positive=True) >>> define_density_matrix(2, variables=[t, z]) Matrix([ [rho11(t, z), rho12(t, z)], [rho21(t, z), rho22(t, z)]])
[ "r", "Return", "a", "symbolic", "density", "matrix", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/symbolic.py#L72-L150
train
50,453
oscarlazoarjona/fast
build/lib/fast/symbolic.py
define_laser_variables
def define_laser_variables(Nl, real_amplitudes=False, variables=None): r"""Return the amplitudes and frequencies of Nl fields. >>> E0, omega_laser = define_laser_variables(2) >>> E0, omega_laser ([E_0^1, E_0^2], [varpi_1, varpi_2]) The amplitudes are complex by default: >>> conjugate(E0[0]) conjugate(E_0^1) But they can optionally be made real: >>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True) >>> conjugate(E0[0]) E_0^1 They can also be made explicit functions of given variables: >>> from sympy import symbols >>> t, z = symbols("t, z", real=True) >>> E0, omega_laser = define_laser_variables(2, variables=[t, z]) >>> E0 [E_0^1(t, z), E_0^2(t, z)] """ if variables is None: E0 = [Symbol(r"E_0^"+str(l+1), real=real_amplitudes) for l in range(Nl)] else: E0 = [Function(r"E_0^"+str(l+1), real=real_amplitudes)(*variables) for l in range(Nl)] omega_laser = [Symbol(r"varpi_"+str(l+1), positive=True) for l in range(Nl)] return E0, omega_laser
python
def define_laser_variables(Nl, real_amplitudes=False, variables=None): r"""Return the amplitudes and frequencies of Nl fields. >>> E0, omega_laser = define_laser_variables(2) >>> E0, omega_laser ([E_0^1, E_0^2], [varpi_1, varpi_2]) The amplitudes are complex by default: >>> conjugate(E0[0]) conjugate(E_0^1) But they can optionally be made real: >>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True) >>> conjugate(E0[0]) E_0^1 They can also be made explicit functions of given variables: >>> from sympy import symbols >>> t, z = symbols("t, z", real=True) >>> E0, omega_laser = define_laser_variables(2, variables=[t, z]) >>> E0 [E_0^1(t, z), E_0^2(t, z)] """ if variables is None: E0 = [Symbol(r"E_0^"+str(l+1), real=real_amplitudes) for l in range(Nl)] else: E0 = [Function(r"E_0^"+str(l+1), real=real_amplitudes)(*variables) for l in range(Nl)] omega_laser = [Symbol(r"varpi_"+str(l+1), positive=True) for l in range(Nl)] return E0, omega_laser
[ "def", "define_laser_variables", "(", "Nl", ",", "real_amplitudes", "=", "False", ",", "variables", "=", "None", ")", ":", "if", "variables", "is", "None", ":", "E0", "=", "[", "Symbol", "(", "r\"E_0^\"", "+", "str", "(", "l", "+", "1", ")", ",", "re...
r"""Return the amplitudes and frequencies of Nl fields. >>> E0, omega_laser = define_laser_variables(2) >>> E0, omega_laser ([E_0^1, E_0^2], [varpi_1, varpi_2]) The amplitudes are complex by default: >>> conjugate(E0[0]) conjugate(E_0^1) But they can optionally be made real: >>> E0, omega_laser = define_laser_variables(2, real_amplitudes=True) >>> conjugate(E0[0]) E_0^1 They can also be made explicit functions of given variables: >>> from sympy import symbols >>> t, z = symbols("t, z", real=True) >>> E0, omega_laser = define_laser_variables(2, variables=[t, z]) >>> E0 [E_0^1(t, z), E_0^2(t, z)]
[ "r", "Return", "the", "amplitudes", "and", "frequencies", "of", "Nl", "fields", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/symbolic.py#L153-L186
train
50,454
oscarlazoarjona/fast
fast/graphic.py
complex_matrix_plot
def complex_matrix_plot(A, logA=False, normalize=False, plot=True, **kwds): r"""A function to plot complex matrices.""" N = len(A[0]) if logA: Anew = [] for i in range(N): row = [] for j in range(N): if A[i][j] != 0: row += [log(log(A[i][j]))] else: row += [0.0] Anew += [row] A = Anew[:] # A=[[log(A[i][j]) for j in range(N)] for i in range(N)] if normalize: norm = 1 for i in range(N): for j in range(N): if abs(A[i][j]) > norm: norm = abs(A[i][j]) A = [[A[i][j]/norm for j in range(N)]for i in range(N)] # print A color_matrix = [] lmax = -1 for i in range(N): row = [] for j in range(N): rgb, l = complex_to_color(A[i][j]) row += [rgb] if l > lmax: lmax = l color_matrix += [row] if normalize: color_matrix = [[tuple([k/lmax for k in color_matrix[i][j]]) for j in range(N)] for i in range(N)] if plot: pyplot.imshow(color_matrix, interpolation='none') pyplot.savefig('a.png', bbox_inches='tight') pyplot.close('all') else: return color_matrix
python
def complex_matrix_plot(A, logA=False, normalize=False, plot=True, **kwds): r"""A function to plot complex matrices.""" N = len(A[0]) if logA: Anew = [] for i in range(N): row = [] for j in range(N): if A[i][j] != 0: row += [log(log(A[i][j]))] else: row += [0.0] Anew += [row] A = Anew[:] # A=[[log(A[i][j]) for j in range(N)] for i in range(N)] if normalize: norm = 1 for i in range(N): for j in range(N): if abs(A[i][j]) > norm: norm = abs(A[i][j]) A = [[A[i][j]/norm for j in range(N)]for i in range(N)] # print A color_matrix = [] lmax = -1 for i in range(N): row = [] for j in range(N): rgb, l = complex_to_color(A[i][j]) row += [rgb] if l > lmax: lmax = l color_matrix += [row] if normalize: color_matrix = [[tuple([k/lmax for k in color_matrix[i][j]]) for j in range(N)] for i in range(N)] if plot: pyplot.imshow(color_matrix, interpolation='none') pyplot.savefig('a.png', bbox_inches='tight') pyplot.close('all') else: return color_matrix
[ "def", "complex_matrix_plot", "(", "A", ",", "logA", "=", "False", ",", "normalize", "=", "False", ",", "plot", "=", "True", ",", "*", "*", "kwds", ")", ":", "N", "=", "len", "(", "A", "[", "0", "]", ")", "if", "logA", ":", "Anew", "=", "[", ...
r"""A function to plot complex matrices.
[ "r", "A", "function", "to", "plot", "complex", "matrices", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L64-L109
train
50,455
oscarlazoarjona/fast
fast/graphic.py
bar_chart_mf
def bar_chart_mf(data, path_name): """Make a bar chart for data on MF quantities.""" N = len(data) ind = np.arange(N) # the x locations for the groups width = 0.8 # the width of the bars fig, ax = pyplot.subplots() rects1 = ax.bar(ind, data, width, color='g') # add some text for labels, title and axes ticks ax.set_ylabel('Population') ax.set_xticks(ind+width/2) labs = ['m='+str(i) for i in range(-N/2+1, N/2+1)] ax.set_xticklabels(labs) def autolabel(rects): # attach some text labels for rect in rects: rect.get_height() autolabel(rects1) pyplot.savefig(path_name) pyplot.close()
python
def bar_chart_mf(data, path_name): """Make a bar chart for data on MF quantities.""" N = len(data) ind = np.arange(N) # the x locations for the groups width = 0.8 # the width of the bars fig, ax = pyplot.subplots() rects1 = ax.bar(ind, data, width, color='g') # add some text for labels, title and axes ticks ax.set_ylabel('Population') ax.set_xticks(ind+width/2) labs = ['m='+str(i) for i in range(-N/2+1, N/2+1)] ax.set_xticklabels(labs) def autolabel(rects): # attach some text labels for rect in rects: rect.get_height() autolabel(rects1) pyplot.savefig(path_name) pyplot.close()
[ "def", "bar_chart_mf", "(", "data", ",", "path_name", ")", ":", "N", "=", "len", "(", "data", ")", "ind", "=", "np", ".", "arange", "(", "N", ")", "# the x locations for the groups\r", "width", "=", "0.8", "# the width of the bars\r", "fig", ",", "ax", "="...
Make a bar chart for data on MF quantities.
[ "Make", "a", "bar", "chart", "for", "data", "on", "MF", "quantities", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L476-L499
train
50,456
oscarlazoarjona/fast
fast/graphic.py
draw_plane_wave_3d
def draw_plane_wave_3d(ax, beam, dist_to_center=0): """Draw the polarization of a plane wave.""" Ex = []; Ey = []; Ez = [] k = [cos(beam.phi)*sin(beam.theta), sin(beam.phi)*sin(beam.theta), cos(beam.theta)] kx, ky, kz = k Nt = 1000 tstep = 7*pi/4/(Nt-1) alpha = beam.alpha beta = beam.beta phi = beam.phi theta = beam.theta omega = 1 for i in range(Nt): t = i*tstep Ex += [(cos(2*alpha)*cos(phi)*cos(theta) - sin(2*alpha)*sin(phi))*cos(omega*t)*cos(2*beta) - (cos(phi)*cos(theta)*sin(2*alpha) + cos(2*alpha)*sin(phi))*sin(omega*t)*sin(2*beta) - dist_to_center*kx] Ey += [(cos(2*alpha)*cos(theta)*sin(phi) + cos(phi)*sin(2*alpha))*cos(omega*t)*cos(2*beta) - (cos(theta)*sin(2*alpha)*sin(phi) - cos(2*alpha)*cos(phi))*sin(omega*t)*sin(2*beta) - dist_to_center*ky] Ez += [-cos(omega*t)*cos(2*alpha)*cos(2*beta)*sin(theta) + sin(omega*t)*sin(2*alpha)*sin(2*beta)*sin(theta) - dist_to_center*kz] ax.plot(Ex, Ey, Ez, beam.color+'-') ff = dist_to_center-1.0 arrx = [-kx*dist_to_center, -kx*ff] arry = [-ky*dist_to_center, -ky*ff] arrz = [-kz*dist_to_center, -kz*ff] arrow = Arrow3D(arrx, arry, arrz, mutation_scale=20, lw=1, arrowstyle="-|>", color=beam.color) ax.add_artist(arrow) ax.plot([Ex[-1]], [Ey[-1]], [Ez[-1]], '.', markersize=8, color=beam.color)
python
def draw_plane_wave_3d(ax, beam, dist_to_center=0): """Draw the polarization of a plane wave.""" Ex = []; Ey = []; Ez = [] k = [cos(beam.phi)*sin(beam.theta), sin(beam.phi)*sin(beam.theta), cos(beam.theta)] kx, ky, kz = k Nt = 1000 tstep = 7*pi/4/(Nt-1) alpha = beam.alpha beta = beam.beta phi = beam.phi theta = beam.theta omega = 1 for i in range(Nt): t = i*tstep Ex += [(cos(2*alpha)*cos(phi)*cos(theta) - sin(2*alpha)*sin(phi))*cos(omega*t)*cos(2*beta) - (cos(phi)*cos(theta)*sin(2*alpha) + cos(2*alpha)*sin(phi))*sin(omega*t)*sin(2*beta) - dist_to_center*kx] Ey += [(cos(2*alpha)*cos(theta)*sin(phi) + cos(phi)*sin(2*alpha))*cos(omega*t)*cos(2*beta) - (cos(theta)*sin(2*alpha)*sin(phi) - cos(2*alpha)*cos(phi))*sin(omega*t)*sin(2*beta) - dist_to_center*ky] Ez += [-cos(omega*t)*cos(2*alpha)*cos(2*beta)*sin(theta) + sin(omega*t)*sin(2*alpha)*sin(2*beta)*sin(theta) - dist_to_center*kz] ax.plot(Ex, Ey, Ez, beam.color+'-') ff = dist_to_center-1.0 arrx = [-kx*dist_to_center, -kx*ff] arry = [-ky*dist_to_center, -ky*ff] arrz = [-kz*dist_to_center, -kz*ff] arrow = Arrow3D(arrx, arry, arrz, mutation_scale=20, lw=1, arrowstyle="-|>", color=beam.color) ax.add_artist(arrow) ax.plot([Ex[-1]], [Ey[-1]], [Ez[-1]], '.', markersize=8, color=beam.color)
[ "def", "draw_plane_wave_3d", "(", "ax", ",", "beam", ",", "dist_to_center", "=", "0", ")", ":", "Ex", "=", "[", "]", "Ey", "=", "[", "]", "Ez", "=", "[", "]", "k", "=", "[", "cos", "(", "beam", ".", "phi", ")", "*", "sin", "(", "beam", ".", ...
Draw the polarization of a plane wave.
[ "Draw", "the", "polarization", "of", "a", "plane", "wave", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L528-L574
train
50,457
oscarlazoarjona/fast
fast/graphic.py
draw_lasers_3d
def draw_lasers_3d(ax, lasers, name=None, distances=None, lim=None): """Draw MOT lasers in 3d.""" if distances is None: distances = [1.0 for i in range(len(lasers))] for i in range(len(lasers)): if type(lasers[i]) == PlaneWave: draw_plane_wave_3d(ax, lasers[i], distances[i]) elif type(lasers[i]) == MotField: draw_mot_field_3d(ax, lasers[i], distances[i]) ax.set_xlabel(r"$x$", fontsize=20) ax.set_ylabel(r"$y$", fontsize=20) ax.set_zlabel(r"$z$", fontsize=20) if lim is None: lim = sqrt(2.0) ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.set_aspect("equal") if name is not None: pyplot.savefig(name, bbox_inches='tight')
python
def draw_lasers_3d(ax, lasers, name=None, distances=None, lim=None): """Draw MOT lasers in 3d.""" if distances is None: distances = [1.0 for i in range(len(lasers))] for i in range(len(lasers)): if type(lasers[i]) == PlaneWave: draw_plane_wave_3d(ax, lasers[i], distances[i]) elif type(lasers[i]) == MotField: draw_mot_field_3d(ax, lasers[i], distances[i]) ax.set_xlabel(r"$x$", fontsize=20) ax.set_ylabel(r"$y$", fontsize=20) ax.set_zlabel(r"$z$", fontsize=20) if lim is None: lim = sqrt(2.0) ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.set_aspect("equal") if name is not None: pyplot.savefig(name, bbox_inches='tight')
[ "def", "draw_lasers_3d", "(", "ax", ",", "lasers", ",", "name", "=", "None", ",", "distances", "=", "None", ",", "lim", "=", "None", ")", ":", "if", "distances", "is", "None", ":", "distances", "=", "[", "1.0", "for", "i", "in", "range", "(", "len"...
Draw MOT lasers in 3d.
[ "Draw", "MOT", "lasers", "in", "3d", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L583-L604
train
50,458
oscarlazoarjona/fast
fast/graphic.py
rotate_and_traslate
def rotate_and_traslate(cur, alpha, v0): r"""Rotate and translate a curve.""" if len(cur) > 2 or (type(cur[0][0]) in [list, tuple]): cur_list = cur[:] for i in range(len(cur_list)): curi = cur_list[i] curi = rotate_and_traslate(curi, alpha, v0) cur_list[i] = curi return cur_list else: x0, y0 = cur rot = np.matrix([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]]) xn = []; yn = [] for i in range(len(x0)): v = np.matrix([[x0[i]], [y0[i]]]) vi = np.dot(rot, v) xn += [float(vi[0][0])+v0[0]]; yn += [float(vi[1][0])+v0[1]] return xn, yn
python
def rotate_and_traslate(cur, alpha, v0): r"""Rotate and translate a curve.""" if len(cur) > 2 or (type(cur[0][0]) in [list, tuple]): cur_list = cur[:] for i in range(len(cur_list)): curi = cur_list[i] curi = rotate_and_traslate(curi, alpha, v0) cur_list[i] = curi return cur_list else: x0, y0 = cur rot = np.matrix([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]]) xn = []; yn = [] for i in range(len(x0)): v = np.matrix([[x0[i]], [y0[i]]]) vi = np.dot(rot, v) xn += [float(vi[0][0])+v0[0]]; yn += [float(vi[1][0])+v0[1]] return xn, yn
[ "def", "rotate_and_traslate", "(", "cur", ",", "alpha", ",", "v0", ")", ":", "if", "len", "(", "cur", ")", ">", "2", "or", "(", "type", "(", "cur", "[", "0", "]", "[", "0", "]", ")", "in", "[", "list", ",", "tuple", "]", ")", ":", "cur_list",...
r"""Rotate and translate a curve.
[ "r", "Rotate", "and", "translate", "a", "curve", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L736-L755
train
50,459
oscarlazoarjona/fast
fast/graphic.py
mirror
def mirror(ax, p0, alpha=0, size=2.54, width=0.5, format=None): r"""Draw a mirror.""" if format is None: format = 'k-' x0 = [size/2, -size/2, -size/2, size/2, size/2] y0 = [0, 0, -width, -width, 0] x1 = [size/2, size/2-width]; y1 = [0, -width] x2 = [-size/2+width, -size/2]; y2 = [0, -width] x3 = [(size/2-size/2+width)/2, (size/2-width-size/2)/2]; y3 = [0, -width] cur_list = [(x0, y0), (x1, y1), (x2, y2), (x3, y3)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format)
python
def mirror(ax, p0, alpha=0, size=2.54, width=0.5, format=None): r"""Draw a mirror.""" if format is None: format = 'k-' x0 = [size/2, -size/2, -size/2, size/2, size/2] y0 = [0, 0, -width, -width, 0] x1 = [size/2, size/2-width]; y1 = [0, -width] x2 = [-size/2+width, -size/2]; y2 = [0, -width] x3 = [(size/2-size/2+width)/2, (size/2-width-size/2)/2]; y3 = [0, -width] cur_list = [(x0, y0), (x1, y1), (x2, y2), (x3, y3)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format)
[ "def", "mirror", "(", "ax", ",", "p0", ",", "alpha", "=", "0", ",", "size", "=", "2.54", ",", "width", "=", "0.5", ",", "format", "=", "None", ")", ":", "if", "format", "is", "None", ":", "format", "=", "'k-'", "x0", "=", "[", "size", "/", "2...
r"""Draw a mirror.
[ "r", "Draw", "a", "mirror", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L758-L771
train
50,460
oscarlazoarjona/fast
fast/graphic.py
eye
def eye(ax, p0, size=1.0, alpha=0, format=None, **kwds): r"""Draw an eye.""" if format is None: format = 'k-' N = 100 ang0 = pi-3*pi/16; angf = pi+3*pi/16 angstep = (angf-ang0)/(N-1) x1 = [size*(cos(i*angstep+ang0)+1) for i in range(N)] y1 = [size*sin(i*angstep+ang0) for i in range(N)] ang2 = ang0+pi/16 x2 = [size, size*(1.2*cos(ang2)+1)] y2 = [0, 1.2*size*(sin(ang2))] y3 = [0, -1.2*size*(sin(ang2))] N = 100 ang0 = ang2; angf = ang2+4*pi/16 angstep = (angf-ang0)/(N-1) x4 = [size*(0.85*cos(i*angstep+ang0)+1) for i in range(N)] y4 = [size*0.85*sin(i*angstep+ang0) for i in range(N)] cur_list = [(x1, y1), (x2, y2), (x2, y3), (x4, y4)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
python
def eye(ax, p0, size=1.0, alpha=0, format=None, **kwds): r"""Draw an eye.""" if format is None: format = 'k-' N = 100 ang0 = pi-3*pi/16; angf = pi+3*pi/16 angstep = (angf-ang0)/(N-1) x1 = [size*(cos(i*angstep+ang0)+1) for i in range(N)] y1 = [size*sin(i*angstep+ang0) for i in range(N)] ang2 = ang0+pi/16 x2 = [size, size*(1.2*cos(ang2)+1)] y2 = [0, 1.2*size*(sin(ang2))] y3 = [0, -1.2*size*(sin(ang2))] N = 100 ang0 = ang2; angf = ang2+4*pi/16 angstep = (angf-ang0)/(N-1) x4 = [size*(0.85*cos(i*angstep+ang0)+1) for i in range(N)] y4 = [size*0.85*sin(i*angstep+ang0) for i in range(N)] cur_list = [(x1, y1), (x2, y2), (x2, y3), (x4, y4)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
[ "def", "eye", "(", "ax", ",", "p0", ",", "size", "=", "1.0", ",", "alpha", "=", "0", ",", "format", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "format", "is", "None", ":", "format", "=", "'k-'", "N", "=", "100", "ang0", "=", "pi", ...
r"""Draw an eye.
[ "r", "Draw", "an", "eye", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L853-L877
train
50,461
oscarlazoarjona/fast
fast/graphic.py
beam_splitter
def beam_splitter(ax, p0, size=2.54, alpha=0, format=None, **kwds): r"""Draw a beam splitter.""" if format is None: format = 'k-' a = size/2 x0 = [a, -a, -a, a, a, -a] y0 = [a, a, -a, -a, a, -a] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
python
def beam_splitter(ax, p0, size=2.54, alpha=0, format=None, **kwds): r"""Draw a beam splitter.""" if format is None: format = 'k-' a = size/2 x0 = [a, -a, -a, a, a, -a] y0 = [a, a, -a, -a, a, -a] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
[ "def", "beam_splitter", "(", "ax", ",", "p0", ",", "size", "=", "2.54", ",", "alpha", "=", "0", ",", "format", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "format", "is", "None", ":", "format", "=", "'k-'", "a", "=", "size", "/", "2", ...
r"""Draw a beam splitter.
[ "r", "Draw", "a", "beam", "splitter", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L880-L890
train
50,462
oscarlazoarjona/fast
fast/graphic.py
simple_beam_splitter
def simple_beam_splitter(ax, p0, size=2.54, width=0.1, alpha=0, format=None, **kwds): r"""Draw a simple beam splitter.""" if format is None: format = 'k-' a = size/2 b = width/2 x0 = [a, -a, -a, a, a] y0 = [b, b, -b, -b, b] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
python
def simple_beam_splitter(ax, p0, size=2.54, width=0.1, alpha=0, format=None, **kwds): r"""Draw a simple beam splitter.""" if format is None: format = 'k-' a = size/2 b = width/2 x0 = [a, -a, -a, a, a] y0 = [b, b, -b, -b, b] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
[ "def", "simple_beam_splitter", "(", "ax", ",", "p0", ",", "size", "=", "2.54", ",", "width", "=", "0.1", ",", "alpha", "=", "0", ",", "format", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "format", "is", "None", ":", "format", "=", "'k-'"...
r"""Draw a simple beam splitter.
[ "r", "Draw", "a", "simple", "beam", "splitter", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L925-L937
train
50,463
oscarlazoarjona/fast
fast/graphic.py
draw_arith
def draw_arith(ax, p0, size=1, alpha=0, arith=None, format=None, fontsize=10, **kwds): r"""Draw an arithmetic operator.""" if format is None: format = 'k-' a = size/2.0 x0 = [0, 2.5*a, 0, 0] y0 = [a, 0, -a, a] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds) if arith is not None: pyplot.text(p0[0]+0.75*a, p0[1], arith, horizontalalignment='center', verticalalignment='center', fontsize=fontsize)
python
def draw_arith(ax, p0, size=1, alpha=0, arith=None, format=None, fontsize=10, **kwds): r"""Draw an arithmetic operator.""" if format is None: format = 'k-' a = size/2.0 x0 = [0, 2.5*a, 0, 0] y0 = [a, 0, -a, a] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds) if arith is not None: pyplot.text(p0[0]+0.75*a, p0[1], arith, horizontalalignment='center', verticalalignment='center', fontsize=fontsize)
[ "def", "draw_arith", "(", "ax", ",", "p0", ",", "size", "=", "1", ",", "alpha", "=", "0", ",", "arith", "=", "None", ",", "format", "=", "None", ",", "fontsize", "=", "10", ",", "*", "*", "kwds", ")", ":", "if", "format", "is", "None", ":", "...
r"""Draw an arithmetic operator.
[ "r", "Draw", "an", "arithmetic", "operator", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L1018-L1033
train
50,464
oscarlazoarjona/fast
fast/graphic.py
draw_state
def draw_state(ax, p, text='', l=0.5, alignment='left', label_displacement=1.0, fontsize=25, atoms=None, atoms_h=0.125, atoms_size=5, **kwds): r"""Draw a quantum state for energy level diagrams.""" ax.plot([p[0]-l/2.0, p[0]+l/2.0], [p[1], p[1]], color='black', **kwds) if text != '': if alignment == 'left': ax.text(p[0] - l/2.0 - label_displacement, p[1], text, horizontalalignment='right', verticalalignment='center', color='black', fontsize=fontsize) elif alignment == 'right': ax.text(p[0] + l/2.0 + label_displacement, p[1], text, horizontalalignment='left', color='black', fontsize=fontsize) # We draw atoms. if atoms is not None: atoms_x = np.linspace(p[0]-l*0.5, p[0]+l*0.5, atoms) atoms_y = [p[1] + atoms_h for i in range(atoms)] # print l, atoms_x ax.plot(atoms_x, atoms_y, "ko", ms=atoms_size)
python
def draw_state(ax, p, text='', l=0.5, alignment='left', label_displacement=1.0, fontsize=25, atoms=None, atoms_h=0.125, atoms_size=5, **kwds): r"""Draw a quantum state for energy level diagrams.""" ax.plot([p[0]-l/2.0, p[0]+l/2.0], [p[1], p[1]], color='black', **kwds) if text != '': if alignment == 'left': ax.text(p[0] - l/2.0 - label_displacement, p[1], text, horizontalalignment='right', verticalalignment='center', color='black', fontsize=fontsize) elif alignment == 'right': ax.text(p[0] + l/2.0 + label_displacement, p[1], text, horizontalalignment='left', color='black', fontsize=fontsize) # We draw atoms. if atoms is not None: atoms_x = np.linspace(p[0]-l*0.5, p[0]+l*0.5, atoms) atoms_y = [p[1] + atoms_h for i in range(atoms)] # print l, atoms_x ax.plot(atoms_x, atoms_y, "ko", ms=atoms_size)
[ "def", "draw_state", "(", "ax", ",", "p", ",", "text", "=", "''", ",", "l", "=", "0.5", ",", "alignment", "=", "'left'", ",", "label_displacement", "=", "1.0", ",", "fontsize", "=", "25", ",", "atoms", "=", "None", ",", "atoms_h", "=", "0.125", ","...
r"""Draw a quantum state for energy level diagrams.
[ "r", "Draw", "a", "quantum", "state", "for", "energy", "level", "diagrams", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L1041-L1061
train
50,465
oscarlazoarjona/fast
fast/graphic.py
decay
def decay(ax, p0, pf, A, n, format=None, **kwds): r"""Draw a spontaneous decay as a wavy line.""" if format is None: format = 'k-' T = sqrt((p0[0]-pf[0])**2+(p0[1]-pf[1])**2) alpha = atan2(pf[1]-p0[1], pf[0]-p0[0]) x = [i*T/400.0 for i in range(401)] y = [A*sin(xi * 2*pi*n/T) for xi in x] cur_list = [(x, y)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
python
def decay(ax, p0, pf, A, n, format=None, **kwds): r"""Draw a spontaneous decay as a wavy line.""" if format is None: format = 'k-' T = sqrt((p0[0]-pf[0])**2+(p0[1]-pf[1])**2) alpha = atan2(pf[1]-p0[1], pf[0]-p0[0]) x = [i*T/400.0 for i in range(401)] y = [A*sin(xi * 2*pi*n/T) for xi in x] cur_list = [(x, y)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
[ "def", "decay", "(", "ax", ",", "p0", ",", "pf", ",", "A", ",", "n", ",", "format", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "format", "is", "None", ":", "format", "=", "'k-'", "T", "=", "sqrt", "(", "(", "p0", "[", "0", "]", "...
r"""Draw a spontaneous decay as a wavy line.
[ "r", "Draw", "a", "spontaneous", "decay", "as", "a", "wavy", "line", "." ]
3e5400672af2a7b7cc616e7f4aa10d7672720222
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L1159-L1172
train
50,466
minrk/wurlitzer
wurlitzer.py
dup2
def dup2(a, b, timeout=3): """Like os.dup2, but retry on EBUSY""" dup_err = None # give FDs 3 seconds to not be busy anymore for i in range(int(10 * timeout)): try: return os.dup2(a, b) except OSError as e: dup_err = e if e.errno == errno.EBUSY: time.sleep(0.1) else: raise if dup_err: raise dup_err
python
def dup2(a, b, timeout=3): """Like os.dup2, but retry on EBUSY""" dup_err = None # give FDs 3 seconds to not be busy anymore for i in range(int(10 * timeout)): try: return os.dup2(a, b) except OSError as e: dup_err = e if e.errno == errno.EBUSY: time.sleep(0.1) else: raise if dup_err: raise dup_err
[ "def", "dup2", "(", "a", ",", "b", ",", "timeout", "=", "3", ")", ":", "dup_err", "=", "None", "# give FDs 3 seconds to not be busy anymore", "for", "i", "in", "range", "(", "int", "(", "10", "*", "timeout", ")", ")", ":", "try", ":", "return", "os", ...
Like os.dup2, but retry on EBUSY
[ "Like", "os", ".", "dup2", "but", "retry", "on", "EBUSY" ]
088bb9957396afea21a88b35999267a9c6e239d5
https://github.com/minrk/wurlitzer/blob/088bb9957396afea21a88b35999267a9c6e239d5/wurlitzer.py#L52-L66
train
50,467
sphinx-contrib/spelling
sphinxcontrib/spelling/checker.py
SpellingChecker.push_filters
def push_filters(self, new_filters): """Add a filter to the tokenizer chain. """ t = self.tokenizer for f in new_filters: t = f(t) self.tokenizer = t
python
def push_filters(self, new_filters): """Add a filter to the tokenizer chain. """ t = self.tokenizer for f in new_filters: t = f(t) self.tokenizer = t
[ "def", "push_filters", "(", "self", ",", "new_filters", ")", ":", "t", "=", "self", ".", "tokenizer", "for", "f", "in", "new_filters", ":", "t", "=", "f", "(", "t", ")", "self", ".", "tokenizer", "=", "t" ]
Add a filter to the tokenizer chain.
[ "Add", "a", "filter", "to", "the", "tokenizer", "chain", "." ]
3108cd86b5935f458ec80e87f8e37f924725d15f
https://github.com/sphinx-contrib/spelling/blob/3108cd86b5935f458ec80e87f8e37f924725d15f/sphinxcontrib/spelling/checker.py#L28-L34
train
50,468
sphinx-contrib/spelling
sphinxcontrib/spelling/checker.py
SpellingChecker.check
def check(self, text): """Yields bad words and suggested alternate spellings. """ for word, pos in self.tokenizer(text): correct = self.dictionary.check(word) if correct: continue yield word, self.dictionary.suggest(word) if self.suggest else [] return
python
def check(self, text): """Yields bad words and suggested alternate spellings. """ for word, pos in self.tokenizer(text): correct = self.dictionary.check(word) if correct: continue yield word, self.dictionary.suggest(word) if self.suggest else [] return
[ "def", "check", "(", "self", ",", "text", ")", ":", "for", "word", ",", "pos", "in", "self", ".", "tokenizer", "(", "text", ")", ":", "correct", "=", "self", ".", "dictionary", ".", "check", "(", "word", ")", "if", "correct", ":", "continue", "yiel...
Yields bad words and suggested alternate spellings.
[ "Yields", "bad", "words", "and", "suggested", "alternate", "spellings", "." ]
3108cd86b5935f458ec80e87f8e37f924725d15f
https://github.com/sphinx-contrib/spelling/blob/3108cd86b5935f458ec80e87f8e37f924725d15f/sphinxcontrib/spelling/checker.py#L41-L49
train
50,469
Yelp/service_configuration_lib
service_configuration_lib/__init__.py
all_nodes_that_receive
def all_nodes_that_receive(service, service_configuration=None, run_only=False, deploy_to_only=False): """If run_only, returns only the services that are in the runs_on list. If deploy_to_only, returns only the services in the deployed_to list. If neither, both are returned, duplicates stripped. Results are always sorted. """ assert not (run_only and deploy_to_only) if service_configuration is None: service_configuration = read_services_configuration() runs_on = service_configuration[service]['runs_on'] deployed_to = service_configuration[service].get('deployed_to') if deployed_to is None: deployed_to = [] if run_only: result = runs_on elif deploy_to_only: result = deployed_to else: result = set(runs_on) | set(deployed_to) return list(sorted(result))
python
def all_nodes_that_receive(service, service_configuration=None, run_only=False, deploy_to_only=False): """If run_only, returns only the services that are in the runs_on list. If deploy_to_only, returns only the services in the deployed_to list. If neither, both are returned, duplicates stripped. Results are always sorted. """ assert not (run_only and deploy_to_only) if service_configuration is None: service_configuration = read_services_configuration() runs_on = service_configuration[service]['runs_on'] deployed_to = service_configuration[service].get('deployed_to') if deployed_to is None: deployed_to = [] if run_only: result = runs_on elif deploy_to_only: result = deployed_to else: result = set(runs_on) | set(deployed_to) return list(sorted(result))
[ "def", "all_nodes_that_receive", "(", "service", ",", "service_configuration", "=", "None", ",", "run_only", "=", "False", ",", "deploy_to_only", "=", "False", ")", ":", "assert", "not", "(", "run_only", "and", "deploy_to_only", ")", "if", "service_configuration",...
If run_only, returns only the services that are in the runs_on list. If deploy_to_only, returns only the services in the deployed_to list. If neither, both are returned, duplicates stripped. Results are always sorted.
[ "If", "run_only", "returns", "only", "the", "services", "that", "are", "in", "the", "runs_on", "list", ".", "If", "deploy_to_only", "returns", "only", "the", "services", "in", "the", "deployed_to", "list", ".", "If", "neither", "both", "are", "returned", "du...
83ac2872f95dd204e9f83ec95b4296a9501bf82d
https://github.com/Yelp/service_configuration_lib/blob/83ac2872f95dd204e9f83ec95b4296a9501bf82d/service_configuration_lib/__init__.py#L246-L268
train
50,470
catherinedevlin/ddl-generator
ddlgenerator/typehelpers.py
precision_and_scale
def precision_and_scale(x): """ From a float, decide what precision and scale are needed to represent it. >>> precision_and_scale(54.2) (3, 1) >>> precision_and_scale(9) (1, 0) Thanks to Mark Ransom, http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python """ if isinstance(x, Decimal): precision = len(x.as_tuple().digits) scale = -1 * x.as_tuple().exponent if scale < 0: precision -= scale scale = 0 return (precision, scale) max_digits = 14 int_part = int(abs(x)) magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1 if magnitude >= max_digits: return (magnitude, 0) frac_part = abs(x) - int_part multiplier = 10 ** (max_digits - magnitude) frac_digits = multiplier + int(multiplier * frac_part + 0.5) while frac_digits % 10 == 0: frac_digits /= 10 scale = int(math.log10(frac_digits)) return (magnitude + scale, scale)
python
def precision_and_scale(x): """ From a float, decide what precision and scale are needed to represent it. >>> precision_and_scale(54.2) (3, 1) >>> precision_and_scale(9) (1, 0) Thanks to Mark Ransom, http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python """ if isinstance(x, Decimal): precision = len(x.as_tuple().digits) scale = -1 * x.as_tuple().exponent if scale < 0: precision -= scale scale = 0 return (precision, scale) max_digits = 14 int_part = int(abs(x)) magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1 if magnitude >= max_digits: return (magnitude, 0) frac_part = abs(x) - int_part multiplier = 10 ** (max_digits - magnitude) frac_digits = multiplier + int(multiplier * frac_part + 0.5) while frac_digits % 10 == 0: frac_digits /= 10 scale = int(math.log10(frac_digits)) return (magnitude + scale, scale)
[ "def", "precision_and_scale", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "Decimal", ")", ":", "precision", "=", "len", "(", "x", ".", "as_tuple", "(", ")", ".", "digits", ")", "scale", "=", "-", "1", "*", "x", ".", "as_tuple", "(", ")...
From a float, decide what precision and scale are needed to represent it. >>> precision_and_scale(54.2) (3, 1) >>> precision_and_scale(9) (1, 0) Thanks to Mark Ransom, http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
[ "From", "a", "float", "decide", "what", "precision", "and", "scale", "are", "needed", "to", "represent", "it", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L17-L47
train
50,471
catherinedevlin/ddl-generator
ddlgenerator/typehelpers.py
best_representative
def best_representative(d1, d2): """ Given two objects each coerced to the most specific type possible, return the one of the least restrictive type. >>> best_representative(Decimal('-37.5'), Decimal('0.9999')) Decimal('-99.9999') >>> best_representative(None, Decimal('6.1')) Decimal('6.1') >>> best_representative(311920, '48-49') '48-490' >>> best_representative(6, 'foo') 'foo' >>> best_representative(Decimal('4.95'), Decimal('6.1')) Decimal('9.99') >>> best_representative(Decimal('-1.9'), Decimal('6.1')) Decimal('-9.9') """ if hasattr(d2, 'strip') and not d2.strip(): return d1 if d1 is None: return d2 elif d2 is None: return d1 preference = (datetime.datetime, bool, int, Decimal, float, str) worst_pref = 0 worst = '' for coerced in (d1, d2): pref = preference.index(type(coerced)) if pref > worst_pref: worst_pref = pref worst = set_worst(worst, coerced) elif pref == worst_pref: if isinstance(coerced, Decimal): worst = set_worst(worst, worst_decimal(coerced, worst)) elif isinstance(coerced, float): worst = set_worst(worst, max(coerced, worst)) else: # int, str if len(str(coerced)) > len(str(worst)): worst = set_worst(worst, coerced) return worst
python
def best_representative(d1, d2): """ Given two objects each coerced to the most specific type possible, return the one of the least restrictive type. >>> best_representative(Decimal('-37.5'), Decimal('0.9999')) Decimal('-99.9999') >>> best_representative(None, Decimal('6.1')) Decimal('6.1') >>> best_representative(311920, '48-49') '48-490' >>> best_representative(6, 'foo') 'foo' >>> best_representative(Decimal('4.95'), Decimal('6.1')) Decimal('9.99') >>> best_representative(Decimal('-1.9'), Decimal('6.1')) Decimal('-9.9') """ if hasattr(d2, 'strip') and not d2.strip(): return d1 if d1 is None: return d2 elif d2 is None: return d1 preference = (datetime.datetime, bool, int, Decimal, float, str) worst_pref = 0 worst = '' for coerced in (d1, d2): pref = preference.index(type(coerced)) if pref > worst_pref: worst_pref = pref worst = set_worst(worst, coerced) elif pref == worst_pref: if isinstance(coerced, Decimal): worst = set_worst(worst, worst_decimal(coerced, worst)) elif isinstance(coerced, float): worst = set_worst(worst, max(coerced, worst)) else: # int, str if len(str(coerced)) > len(str(worst)): worst = set_worst(worst, coerced) return worst
[ "def", "best_representative", "(", "d1", ",", "d2", ")", ":", "if", "hasattr", "(", "d2", ",", "'strip'", ")", "and", "not", "d2", ".", "strip", "(", ")", ":", "return", "d1", "if", "d1", "is", "None", ":", "return", "d2", "elif", "d2", "is", "No...
Given two objects each coerced to the most specific type possible, return the one of the least restrictive type. >>> best_representative(Decimal('-37.5'), Decimal('0.9999')) Decimal('-99.9999') >>> best_representative(None, Decimal('6.1')) Decimal('6.1') >>> best_representative(311920, '48-49') '48-490' >>> best_representative(6, 'foo') 'foo' >>> best_representative(Decimal('4.95'), Decimal('6.1')) Decimal('9.99') >>> best_representative(Decimal('-1.9'), Decimal('6.1')) Decimal('-9.9')
[ "Given", "two", "objects", "each", "coerced", "to", "the", "most", "specific", "type", "possible", "return", "the", "one", "of", "the", "least", "restrictive", "type", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L172-L213
train
50,472
catherinedevlin/ddl-generator
ddlgenerator/typehelpers.py
best_coercable
def best_coercable(data): """ Given an iterable of scalar data, returns the datum representing the most specific data type the list overall can be coerced into, preferring datetimes, then bools, then integers, then decimals, then floats, then strings. >>> best_coercable((6, '2', 9)) 6 >>> best_coercable((Decimal('6.1'), 2, 9)) Decimal('6.1') >>> best_coercable(('2014 jun 7', '2011 may 2')) datetime.datetime(2014, 6, 7, 0, 0) >>> best_coercable((7, 21.4, 'ruining everything')) 'ruining everything' """ preference = (datetime.datetime, bool, int, Decimal, float, str) worst_pref = 0 worst = '' for datum in data: coerced = coerce_to_specific(datum) pref = preference.index(type(coerced)) if pref > worst_pref: worst_pref = pref worst = coerced elif pref == worst_pref: if isinstance(coerced, Decimal): worst = worst_decimal(coerced, worst) elif isinstance(coerced, float): worst = max(coerced, worst) else: # int, str if len(str(coerced)) > len(str(worst)): worst = coerced return worst
python
def best_coercable(data): """ Given an iterable of scalar data, returns the datum representing the most specific data type the list overall can be coerced into, preferring datetimes, then bools, then integers, then decimals, then floats, then strings. >>> best_coercable((6, '2', 9)) 6 >>> best_coercable((Decimal('6.1'), 2, 9)) Decimal('6.1') >>> best_coercable(('2014 jun 7', '2011 may 2')) datetime.datetime(2014, 6, 7, 0, 0) >>> best_coercable((7, 21.4, 'ruining everything')) 'ruining everything' """ preference = (datetime.datetime, bool, int, Decimal, float, str) worst_pref = 0 worst = '' for datum in data: coerced = coerce_to_specific(datum) pref = preference.index(type(coerced)) if pref > worst_pref: worst_pref = pref worst = coerced elif pref == worst_pref: if isinstance(coerced, Decimal): worst = worst_decimal(coerced, worst) elif isinstance(coerced, float): worst = max(coerced, worst) else: # int, str if len(str(coerced)) > len(str(worst)): worst = coerced return worst
[ "def", "best_coercable", "(", "data", ")", ":", "preference", "=", "(", "datetime", ".", "datetime", ",", "bool", ",", "int", ",", "Decimal", ",", "float", ",", "str", ")", "worst_pref", "=", "0", "worst", "=", "''", "for", "datum", "in", "data", ":"...
Given an iterable of scalar data, returns the datum representing the most specific data type the list overall can be coerced into, preferring datetimes, then bools, then integers, then decimals, then floats, then strings. >>> best_coercable((6, '2', 9)) 6 >>> best_coercable((Decimal('6.1'), 2, 9)) Decimal('6.1') >>> best_coercable(('2014 jun 7', '2011 may 2')) datetime.datetime(2014, 6, 7, 0, 0) >>> best_coercable((7, 21.4, 'ruining everything')) 'ruining everything'
[ "Given", "an", "iterable", "of", "scalar", "data", "returns", "the", "datum", "representing", "the", "most", "specific", "data", "type", "the", "list", "overall", "can", "be", "coerced", "into", "preferring", "datetimes", "then", "bools", "then", "integers", "...
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L215-L247
train
50,473
catherinedevlin/ddl-generator
ddlgenerator/typehelpers.py
sqla_datatype_for
def sqla_datatype_for(datum): """ Given a scalar Python value, picks an appropriate SQLAlchemy data type. >>> sqla_datatype_for(7.2) DECIMAL(precision=2, scale=1) >>> sqla_datatype_for("Jan 17 2012") <class 'sqlalchemy.sql.sqltypes.DATETIME'> >>> sqla_datatype_for("something else") Unicode(length=14) """ try: if len(_complex_enough_to_be_date.findall(datum)) > 1: dateutil.parser.parse(datum) return sa.DATETIME except (TypeError, ValueError): pass try: (prec, scale) = precision_and_scale(datum) return sa.DECIMAL(prec, scale) except TypeError: return sa.Unicode(len(datum))
python
def sqla_datatype_for(datum): """ Given a scalar Python value, picks an appropriate SQLAlchemy data type. >>> sqla_datatype_for(7.2) DECIMAL(precision=2, scale=1) >>> sqla_datatype_for("Jan 17 2012") <class 'sqlalchemy.sql.sqltypes.DATETIME'> >>> sqla_datatype_for("something else") Unicode(length=14) """ try: if len(_complex_enough_to_be_date.findall(datum)) > 1: dateutil.parser.parse(datum) return sa.DATETIME except (TypeError, ValueError): pass try: (prec, scale) = precision_and_scale(datum) return sa.DECIMAL(prec, scale) except TypeError: return sa.Unicode(len(datum))
[ "def", "sqla_datatype_for", "(", "datum", ")", ":", "try", ":", "if", "len", "(", "_complex_enough_to_be_date", ".", "findall", "(", "datum", ")", ")", ">", "1", ":", "dateutil", ".", "parser", ".", "parse", "(", "datum", ")", "return", "sa", ".", "DAT...
Given a scalar Python value, picks an appropriate SQLAlchemy data type. >>> sqla_datatype_for(7.2) DECIMAL(precision=2, scale=1) >>> sqla_datatype_for("Jan 17 2012") <class 'sqlalchemy.sql.sqltypes.DATETIME'> >>> sqla_datatype_for("something else") Unicode(length=14)
[ "Given", "a", "scalar", "Python", "value", "picks", "an", "appropriate", "SQLAlchemy", "data", "type", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L249-L270
train
50,474
catherinedevlin/ddl-generator
ddlgenerator/console.py
generate
def generate(args=None, namespace=None, file=None): """ Genereate DDL from data sources named. :args: String or list of strings to be parsed for arguments :namespace: Namespace to extract arguments from :file: Write to this open file object (default stdout) """ if hasattr(args, 'split'): args = args.split() args = parser.parse_args(args, namespace) set_logging(args) logging.info(str(args)) if args.dialect in ('pg', 'pgsql', 'postgres'): args.dialect = 'postgresql' if args.dialect.startswith('dj'): args.dialect = 'django' elif args.dialect.startswith('sqla'): args.dialect = 'sqlalchemy' if args.dialect not in dialect_names: raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names)) if args.dialect == 'sqlalchemy': print(sqla_head, file=file) for datafile in args.datafile: if is_sqlalchemy_url.search(datafile): table_names_for_insert = [] for tbl in sqlalchemy_table_sources(datafile): t = generate_one(tbl, args, table_name=tbl.generator.name, file=file) if t.data: table_names_for_insert.append(tbl.generator.name) if args.inserts and args.dialect == 'sqlalchemy': print(sqla_inserter_call(table_names_for_insert), file=file) if t and args.inserts: for seq_update in emit_db_sequence_updates(t.source.db_engine): if args.dialect == 'sqlalchemy': print(' conn.execute("%s")' % seq_update, file=file) elif args.dialect == 'postgresql': print(seq_update, file=file) else: generate_one(datafile, args, file=file)
python
def generate(args=None, namespace=None, file=None): """ Genereate DDL from data sources named. :args: String or list of strings to be parsed for arguments :namespace: Namespace to extract arguments from :file: Write to this open file object (default stdout) """ if hasattr(args, 'split'): args = args.split() args = parser.parse_args(args, namespace) set_logging(args) logging.info(str(args)) if args.dialect in ('pg', 'pgsql', 'postgres'): args.dialect = 'postgresql' if args.dialect.startswith('dj'): args.dialect = 'django' elif args.dialect.startswith('sqla'): args.dialect = 'sqlalchemy' if args.dialect not in dialect_names: raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names)) if args.dialect == 'sqlalchemy': print(sqla_head, file=file) for datafile in args.datafile: if is_sqlalchemy_url.search(datafile): table_names_for_insert = [] for tbl in sqlalchemy_table_sources(datafile): t = generate_one(tbl, args, table_name=tbl.generator.name, file=file) if t.data: table_names_for_insert.append(tbl.generator.name) if args.inserts and args.dialect == 'sqlalchemy': print(sqla_inserter_call(table_names_for_insert), file=file) if t and args.inserts: for seq_update in emit_db_sequence_updates(t.source.db_engine): if args.dialect == 'sqlalchemy': print(' conn.execute("%s")' % seq_update, file=file) elif args.dialect == 'postgresql': print(seq_update, file=file) else: generate_one(datafile, args, file=file)
[ "def", "generate", "(", "args", "=", "None", ",", "namespace", "=", "None", ",", "file", "=", "None", ")", ":", "if", "hasattr", "(", "args", ",", "'split'", ")", ":", "args", "=", "args", ".", "split", "(", ")", "args", "=", "parser", ".", "pars...
Genereate DDL from data sources named. :args: String or list of strings to be parsed for arguments :namespace: Namespace to extract arguments from :file: Write to this open file object (default stdout)
[ "Genereate", "DDL", "from", "data", "sources", "named", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/console.py#L72-L112
train
50,475
catherinedevlin/ddl-generator
ddlgenerator/ddlgenerator.py
Table.ddl
def ddl(self, dialect=None, creates=True, drops=True): """ Returns SQL to define the table. """ dialect = self._dialect(dialect) creator = CreateTable(self.table).compile(mock_engines[dialect]) creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" % (col, self.comments[col])) for col in self.comments) result = [] if drops: result.append(self._dropper(dialect) + ';') if creates: result.append("%s;\n%s" % (creator, comments)) for child in self.children.values(): result.append(child.ddl(dialect=dialect, creates=creates, drops=drops)) return '\n\n'.join(result)
python
def ddl(self, dialect=None, creates=True, drops=True): """ Returns SQL to define the table. """ dialect = self._dialect(dialect) creator = CreateTable(self.table).compile(mock_engines[dialect]) creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" % (col, self.comments[col])) for col in self.comments) result = [] if drops: result.append(self._dropper(dialect) + ';') if creates: result.append("%s;\n%s" % (creator, comments)) for child in self.children.values(): result.append(child.ddl(dialect=dialect, creates=creates, drops=drops)) return '\n\n'.join(result)
[ "def", "ddl", "(", "self", ",", "dialect", "=", "None", ",", "creates", "=", "True", ",", "drops", "=", "True", ")", ":", "dialect", "=", "self", ".", "_dialect", "(", "dialect", ")", "creator", "=", "CreateTable", "(", "self", ".", "table", ")", "...
Returns SQL to define the table.
[ "Returns", "SQL", "to", "define", "the", "table", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L263-L281
train
50,476
catherinedevlin/ddl-generator
ddlgenerator/ddlgenerator.py
Table.sqlalchemy
def sqlalchemy(self, is_top=True): """Dumps Python code to set up the table's SQLAlchemy model""" table_def = self.table_backref_remover.sub('', self.table.__repr__()) # inject UNIQUE constraints into table definition constraint_defs = [] for constraint in self.table.constraints: if isinstance(constraint, sa.sql.schema.UniqueConstraint): col_list = ', '.join("'%s'" % c.name for c in constraint.columns) constraint_defs.append('UniqueConstraint(%s)' % col_list) if constraint_defs: constraint_defs = ',\n '.join(constraint_defs) + ',' table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None') table_def = table_def.replace("MetaData(bind=None)", "metadata") table_def = table_def.replace("Column(", "\n Column(") table_def = table_def.replace("schema=", "\n schema=") result = [table_def, ] result.extend(c.sqlalchemy(is_top=False) for c in self.children.values()) result = "\n%s = %s" % (self.table_name, "\n".join(result)) if is_top: sqla_imports = set(self.capitalized_words.findall(table_def)) sqla_imports &= set(dir(sa)) sqla_imports = sorted(sqla_imports) result = self.sqlalchemy_setup_template % ( ", ".join(sqla_imports), result, self.table.name) result = textwrap.dedent(result) return result
python
def sqlalchemy(self, is_top=True): """Dumps Python code to set up the table's SQLAlchemy model""" table_def = self.table_backref_remover.sub('', self.table.__repr__()) # inject UNIQUE constraints into table definition constraint_defs = [] for constraint in self.table.constraints: if isinstance(constraint, sa.sql.schema.UniqueConstraint): col_list = ', '.join("'%s'" % c.name for c in constraint.columns) constraint_defs.append('UniqueConstraint(%s)' % col_list) if constraint_defs: constraint_defs = ',\n '.join(constraint_defs) + ',' table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None') table_def = table_def.replace("MetaData(bind=None)", "metadata") table_def = table_def.replace("Column(", "\n Column(") table_def = table_def.replace("schema=", "\n schema=") result = [table_def, ] result.extend(c.sqlalchemy(is_top=False) for c in self.children.values()) result = "\n%s = %s" % (self.table_name, "\n".join(result)) if is_top: sqla_imports = set(self.capitalized_words.findall(table_def)) sqla_imports &= set(dir(sa)) sqla_imports = sorted(sqla_imports) result = self.sqlalchemy_setup_template % ( ", ".join(sqla_imports), result, self.table.name) result = textwrap.dedent(result) return result
[ "def", "sqlalchemy", "(", "self", ",", "is_top", "=", "True", ")", ":", "table_def", "=", "self", ".", "table_backref_remover", ".", "sub", "(", "''", ",", "self", ".", "table", ".", "__repr__", "(", ")", ")", "# inject UNIQUE constraints into table definition...
Dumps Python code to set up the table's SQLAlchemy model
[ "Dumps", "Python", "code", "to", "set", "up", "the", "table", "s", "SQLAlchemy", "model" ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L292-L320
train
50,477
catherinedevlin/ddl-generator
ddlgenerator/ddlgenerator.py
Table._prep_datum
def _prep_datum(self, datum, dialect, col, needs_conversion): """Puts a value in proper format for a SQL string""" if datum is None or (needs_conversion and not str(datum).strip()): return 'NULL' pytype = self.columns[col]['pytype'] if needs_conversion: if pytype == datetime.datetime: datum = dateutil.parser.parse(datum) elif pytype == bool: datum = th.coerce_to_specific(datum) if dialect.startswith('sqlite'): datum = 1 if datum else 0 else: datum = pytype(str(datum)) if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date): if dialect in self._datetime_format: return datum.strftime(self._datetime_format[dialect]) else: return "'%s'" % datum elif hasattr(datum, 'lower'): # simple SQL injection protection, sort of... ? return "'%s'" % datum.replace("'", "''") else: return datum
python
def _prep_datum(self, datum, dialect, col, needs_conversion): """Puts a value in proper format for a SQL string""" if datum is None or (needs_conversion and not str(datum).strip()): return 'NULL' pytype = self.columns[col]['pytype'] if needs_conversion: if pytype == datetime.datetime: datum = dateutil.parser.parse(datum) elif pytype == bool: datum = th.coerce_to_specific(datum) if dialect.startswith('sqlite'): datum = 1 if datum else 0 else: datum = pytype(str(datum)) if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date): if dialect in self._datetime_format: return datum.strftime(self._datetime_format[dialect]) else: return "'%s'" % datum elif hasattr(datum, 'lower'): # simple SQL injection protection, sort of... ? return "'%s'" % datum.replace("'", "''") else: return datum
[ "def", "_prep_datum", "(", "self", ",", "datum", ",", "dialect", ",", "col", ",", "needs_conversion", ")", ":", "if", "datum", "is", "None", "or", "(", "needs_conversion", "and", "not", "str", "(", "datum", ")", ".", "strip", "(", ")", ")", ":", "ret...
Puts a value in proper format for a SQL string
[ "Puts", "a", "value", "in", "proper", "format", "for", "a", "SQL", "string" ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L360-L385
train
50,478
catherinedevlin/ddl-generator
ddlgenerator/reshape.py
_id_fieldname
def _id_fieldname(fieldnames, table_name = ''): """ Finds the field name from a dict likeliest to be its unique ID >>> _id_fieldname({'bar': True, 'id': 1}, 'foo') 'id' >>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo') 'foo_id' >>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo') """ templates = ['%s_%%s' % table_name, '%s', '_%s'] for stub in ['id', 'num', 'no', 'number']: for t in templates: if t % stub in fieldnames: return t % stub
python
def _id_fieldname(fieldnames, table_name = ''): """ Finds the field name from a dict likeliest to be its unique ID >>> _id_fieldname({'bar': True, 'id': 1}, 'foo') 'id' >>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo') 'foo_id' >>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo') """ templates = ['%s_%%s' % table_name, '%s', '_%s'] for stub in ['id', 'num', 'no', 'number']: for t in templates: if t % stub in fieldnames: return t % stub
[ "def", "_id_fieldname", "(", "fieldnames", ",", "table_name", "=", "''", ")", ":", "templates", "=", "[", "'%s_%%s'", "%", "table_name", ",", "'%s'", ",", "'_%s'", "]", "for", "stub", "in", "[", "'id'", ",", "'num'", ",", "'no'", ",", "'number'", "]", ...
Finds the field name from a dict likeliest to be its unique ID >>> _id_fieldname({'bar': True, 'id': 1}, 'foo') 'id' >>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo') 'foo_id' >>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
[ "Finds", "the", "field", "name", "from", "a", "dict", "likeliest", "to", "be", "its", "unique", "ID" ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L69-L83
train
50,479
catherinedevlin/ddl-generator
ddlgenerator/reshape.py
unnest_child_dict
def unnest_child_dict(parent, key, parent_name=''): """ If ``parent`` dictionary has a ``key`` whose ``val`` is a dict, unnest ``val``'s fields into ``parent`` and remove ``key``. >>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_id': 1, 'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital': 'Québec City', 'province': 'Québec'} """ val = parent[key] name = "%s['%s']" % (parent_name, key) logging.debug("Unnesting dict %s" % name) id = _id_fieldname(val, parent_name) if id: logging.debug("%s is %s's ID" % (id, key)) if len(val) <= 2: logging.debug('Removing ID column %s.%s' % (key, id)) val.pop(id) if len(val) == 0: logging.debug('%s is empty, removing from %s' % (name, parent_name)) parent.pop(key) return elif len(val) == 1: logging.debug('Nested one-item dict in %s, making scalar.' % name) parent[key] = list(val.values())[0] return else: logging.debug('Pushing all fields from %s up to %s' % (name, parent_name)) new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val] overlap = (set(new_field_names) & set(parent)) - set(id or []) if overlap: logging.error("Could not unnest child %s; %s present in %s" % (name, key, ','.join(overlap), parent_name)) return for (child_key, child_val) in val.items(): new_field_name = '%s_%s' % (key, child_key.strip('_')) parent[new_field_name] = child_val parent.pop(key)
python
def unnest_child_dict(parent, key, parent_name=''): """ If ``parent`` dictionary has a ``key`` whose ``val`` is a dict, unnest ``val``'s fields into ``parent`` and remove ``key``. >>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_id': 1, 'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital': 'Québec City', 'province': 'Québec'} """ val = parent[key] name = "%s['%s']" % (parent_name, key) logging.debug("Unnesting dict %s" % name) id = _id_fieldname(val, parent_name) if id: logging.debug("%s is %s's ID" % (id, key)) if len(val) <= 2: logging.debug('Removing ID column %s.%s' % (key, id)) val.pop(id) if len(val) == 0: logging.debug('%s is empty, removing from %s' % (name, parent_name)) parent.pop(key) return elif len(val) == 1: logging.debug('Nested one-item dict in %s, making scalar.' % name) parent[key] = list(val.values())[0] return else: logging.debug('Pushing all fields from %s up to %s' % (name, parent_name)) new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val] overlap = (set(new_field_names) & set(parent)) - set(id or []) if overlap: logging.error("Could not unnest child %s; %s present in %s" % (name, key, ','.join(overlap), parent_name)) return for (child_key, child_val) in val.items(): new_field_name = '%s_%s' % (key, child_key.strip('_')) parent[new_field_name] = child_val parent.pop(key)
[ "def", "unnest_child_dict", "(", "parent", ",", "key", ",", "parent_name", "=", "''", ")", ":", "val", "=", "parent", "[", "key", "]", "name", "=", "\"%s['%s']\"", "%", "(", "parent_name", ",", "key", ")", "logging", ".", "debug", "(", "\"Unnesting dict ...
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict, unnest ``val``'s fields into ``parent`` and remove ``key``. >>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital_id': 1, 'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'} >>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}} >>> unnest_child_dict(parent, 'capital', 'provinces') >>> pprint(parent) {'capital': 'Québec City', 'province': 'Québec'}
[ "If", "parent", "dictionary", "has", "a", "key", "whose", "val", "is", "a", "dict", "unnest", "val", "s", "fields", "into", "parent", "and", "remove", "key", "." ]
db6741216d1e9ad84b07d4ad281bfff021d344ea
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L113-L165
train
50,480
skelsec/minikerberos
minikerberos/ccache.py
Header.parse
def parse(data): """ returns a list of header tags """ reader = io.BytesIO(data) headers = [] while reader.tell() < len(data): h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers
python
def parse(data): """ returns a list of header tags """ reader = io.BytesIO(data) headers = [] while reader.tell() < len(data): h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers
[ "def", "parse", "(", "data", ")", ":", "reader", "=", "io", ".", "BytesIO", "(", "data", ")", "headers", "=", "[", "]", "while", "reader", ".", "tell", "(", ")", "<", "len", "(", "data", ")", ":", "h", "=", "Header", "(", ")", "h", ".", "tag"...
returns a list of header tags
[ "returns", "a", "list", "of", "header", "tags" ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L29-L41
train
50,481
skelsec/minikerberos
minikerberos/ccache.py
Credential.to_tgt
def to_tgt(self): """ Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format """ enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t
python
def to_tgt(self): """ Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format """ enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t
[ "def", "to_tgt", "(", "self", ")", ":", "enc_part", "=", "EncryptedData", "(", "{", "'etype'", ":", "1", ",", "'cipher'", ":", "b''", "}", ")", "tgt_rep", "=", "{", "}", "tgt_rep", "[", "'pvno'", "]", "=", "krb5_pvno", "tgt_rep", "[", "'msg-type'", "...
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
[ "Returns", "the", "native", "format", "of", "an", "AS_REP", "message", "and", "the", "sessionkey", "in", "EncryptionKey", "native", "format" ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L102-L118
train
50,482
skelsec/minikerberos
minikerberos/ccache.py
CCACHE.from_kirbidir
def from_kirbidir(directory_path): """ Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object """ cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc
python
def from_kirbidir(directory_path): """ Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object """ cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc
[ "def", "from_kirbidir", "(", "directory_path", ")", ":", "cc", "=", "CCACHE", "(", ")", "dir_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "directory_path", ")", ",", "'*.kirbi'", ")", "for", "filename", "in",...
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
[ "Iterates", "trough", "all", ".", "kirbi", "files", "in", "a", "given", "directory", "and", "converts", "all", "of", "them", "into", "one", "CCACHE", "object" ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L638-L650
train
50,483
skelsec/minikerberos
minikerberos/ccache.py
CCACHE.to_file
def to_file(self, filename): """ Writes the contents of the CCACHE object to a file """ with open(filename, 'wb') as f: f.write(self.to_bytes())
python
def to_file(self, filename): """ Writes the contents of the CCACHE object to a file """ with open(filename, 'wb') as f: f.write(self.to_bytes())
[ "def", "to_file", "(", "self", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "to_bytes", "(", ")", ")" ]
Writes the contents of the CCACHE object to a file
[ "Writes", "the", "contents", "of", "the", "CCACHE", "object", "to", "a", "file" ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L674-L679
train
50,484
skelsec/minikerberos
minikerberos/common.py
print_table
def print_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" #Count the column width widths = [] for line in lines: for i,size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size #Generate the format string to pad the columns print_string = "" for i,width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if (len(print_string) == 0): return print_string = print_string[:-3] #Print the actual data for i,line in enumerate(lines): print(print_string.format(*line)) if (i == 0 and separate_head): print("-"*(sum(widths)+3*(len(widths)-1)))
python
def print_table(lines, separate_head=True): """Prints a formatted table given a 2 dimensional array""" #Count the column width widths = [] for line in lines: for i,size in enumerate([len(x) for x in line]): while i >= len(widths): widths.append(0) if size > widths[i]: widths[i] = size #Generate the format string to pad the columns print_string = "" for i,width in enumerate(widths): print_string += "{" + str(i) + ":" + str(width) + "} | " if (len(print_string) == 0): return print_string = print_string[:-3] #Print the actual data for i,line in enumerate(lines): print(print_string.format(*line)) if (i == 0 and separate_head): print("-"*(sum(widths)+3*(len(widths)-1)))
[ "def", "print_table", "(", "lines", ",", "separate_head", "=", "True", ")", ":", "#Count the column width", "widths", "=", "[", "]", "for", "line", "in", "lines", ":", "for", "i", ",", "size", "in", "enumerate", "(", "[", "len", "(", "x", ")", "for", ...
Prints a formatted table given a 2 dimensional array
[ "Prints", "a", "formatted", "table", "given", "a", "2", "dimensional", "array" ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L246-L269
train
50,485
skelsec/minikerberos
minikerberos/common.py
KerberosCredential.get_key_for_enctype
def get_key_for_enctype(self, etype): """ Returns the encryption key bytes for the enctryption type. """ if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96: if self.kerberos_key_aes_256: return bytes.fromhex(self.kerberos_key_aes_256) if self.password is not None: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.AES256, self.password.encode(), salt).contents raise Exception('There is no key for AES256 encryption') elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96: if self.kerberos_key_aes_128: return bytes.fromhex(self.kerberos_key_aes_128) if self.password is not None: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.AES128, self.password.encode(), salt).contents raise Exception('There is no key for AES128 encryption') elif etype == EncryptionType.ARCFOUR_HMAC_MD5: if self.kerberos_key_rc4: return bytes.fromhex(self.kerberos_key_rc4) if self.nt_hash: return bytes.fromhex(self.nt_hash) elif self.password: self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper() return bytes.fromhex(self.nt_hash) else: raise Exception('There is no key for RC4 encryption') elif etype == EncryptionType.DES3_CBC_SHA1: if self.kerberos_key_des3: return bytes.fromhex(self.kerberos_key_des) elif self.password: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.DES3, self.password.encode(), salt).contents else: raise Exception('There is no key for DES3 encryption') elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or if self.kerberos_key_des: return bytes.fromhex(self.kerberos_key_des) elif self.password: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents else: raise Exception('There is no key for DES3 encryption') else: raise Exception('Unsupported encryption type: %s' % etype.name)
python
def get_key_for_enctype(self, etype): """ Returns the encryption key bytes for the enctryption type. """ if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96: if self.kerberos_key_aes_256: return bytes.fromhex(self.kerberos_key_aes_256) if self.password is not None: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.AES256, self.password.encode(), salt).contents raise Exception('There is no key for AES256 encryption') elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96: if self.kerberos_key_aes_128: return bytes.fromhex(self.kerberos_key_aes_128) if self.password is not None: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.AES128, self.password.encode(), salt).contents raise Exception('There is no key for AES128 encryption') elif etype == EncryptionType.ARCFOUR_HMAC_MD5: if self.kerberos_key_rc4: return bytes.fromhex(self.kerberos_key_rc4) if self.nt_hash: return bytes.fromhex(self.nt_hash) elif self.password: self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper() return bytes.fromhex(self.nt_hash) else: raise Exception('There is no key for RC4 encryption') elif etype == EncryptionType.DES3_CBC_SHA1: if self.kerberos_key_des3: return bytes.fromhex(self.kerberos_key_des) elif self.password: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.DES3, self.password.encode(), salt).contents else: raise Exception('There is no key for DES3 encryption') elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or if self.kerberos_key_des: return bytes.fromhex(self.kerberos_key_des) elif self.password: salt = (self.domain.upper() + self.username).encode() return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents else: raise Exception('There is no key for DES3 encryption') else: raise Exception('Unsupported encryption type: %s' % etype.name)
[ "def", "get_key_for_enctype", "(", "self", ",", "etype", ")", ":", "if", "etype", "==", "EncryptionType", ".", "AES256_CTS_HMAC_SHA1_96", ":", "if", "self", ".", "kerberos_key_aes_256", ":", "return", "bytes", ".", "fromhex", "(", "self", ".", "kerberos_key_aes_...
Returns the encryption key bytes for the enctryption type.
[ "Returns", "the", "encryption", "key", "bytes", "for", "the", "enctryption", "type", "." ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L54-L101
train
50,486
skelsec/minikerberos
minikerberos/security.py
KerberosUserEnum.run
def run(self, realm, users): """ Requests a TGT in the name of the users specified in users. Returns a list of usernames that are in the domain. realm: kerberos realm (domain name of the corp) users: list : list of usernames to test """ existing_users = [] for user in users: logging.debug('Probing user %s' % user) req = KerberosUserEnum.construct_tgt_req(realm, user) rep = self.ksoc.sendrecv(req.dump(), throw = False) if rep.name != 'KRB_ERROR': # user doesnt need preauth, but it exists existing_users.append(user) elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value: # any other error means user doesnt exist continue else: # preauth needed, only if user exists existing_users.append(user) return existing_users
python
def run(self, realm, users): """ Requests a TGT in the name of the users specified in users. Returns a list of usernames that are in the domain. realm: kerberos realm (domain name of the corp) users: list : list of usernames to test """ existing_users = [] for user in users: logging.debug('Probing user %s' % user) req = KerberosUserEnum.construct_tgt_req(realm, user) rep = self.ksoc.sendrecv(req.dump(), throw = False) if rep.name != 'KRB_ERROR': # user doesnt need preauth, but it exists existing_users.append(user) elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value: # any other error means user doesnt exist continue else: # preauth needed, only if user exists existing_users.append(user) return existing_users
[ "def", "run", "(", "self", ",", "realm", ",", "users", ")", ":", "existing_users", "=", "[", "]", "for", "user", "in", "users", ":", "logging", ".", "debug", "(", "'Probing user %s'", "%", "user", ")", "req", "=", "KerberosUserEnum", ".", "construct_tgt_...
Requests a TGT in the name of the users specified in users. Returns a list of usernames that are in the domain. realm: kerberos realm (domain name of the corp) users: list : list of usernames to test
[ "Requests", "a", "TGT", "in", "the", "name", "of", "the", "users", "specified", "in", "users", ".", "Returns", "a", "list", "of", "usernames", "that", "are", "in", "the", "domain", "." ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L43-L69
train
50,487
skelsec/minikerberos
minikerberos/communication.py
KerbrosComm.from_tgt
def from_tgt(ksoc, tgt, key): """ Sets up the kerberos object from tgt and the session key. Use this function when pulling the TGT from ccache file. """ kc = KerbrosComm(None, ksoc) kc.kerberos_TGT = tgt kc.kerberos_cipher_type = key['keytype'] kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue']) kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type] return kc
python
def from_tgt(ksoc, tgt, key): """ Sets up the kerberos object from tgt and the session key. Use this function when pulling the TGT from ccache file. """ kc = KerbrosComm(None, ksoc) kc.kerberos_TGT = tgt kc.kerberos_cipher_type = key['keytype'] kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue']) kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type] return kc
[ "def", "from_tgt", "(", "ksoc", ",", "tgt", ",", "key", ")", ":", "kc", "=", "KerbrosComm", "(", "None", ",", "ksoc", ")", "kc", ".", "kerberos_TGT", "=", "tgt", "kc", ".", "kerberos_cipher_type", "=", "key", "[", "'keytype'", "]", "kc", ".", "kerber...
Sets up the kerberos object from tgt and the session key. Use this function when pulling the TGT from ccache file.
[ "Sets", "up", "the", "kerberos", "object", "from", "tgt", "and", "the", "session", "key", ".", "Use", "this", "function", "when", "pulling", "the", "TGT", "from", "ccache", "file", "." ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L138-L149
train
50,488
skelsec/minikerberos
minikerberos/communication.py
KerbrosComm.get_TGS
def get_TGS(self, spn_user, override_etype = None): """ Requests a TGS ticket for the specified user. Retruns the TGS ticket, end the decrpyted encTGSRepPart. spn_user: KerberosTarget: the service user you want to get TGS for. override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket """ #construct tgs_req logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname()) now = datetime.datetime.utcnow() kdc_req_body = {} kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize'])) kdc_req_body['realm'] = spn_user.domain.upper() kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()}) kdc_req_body['till'] = now + datetime.timedelta(days=1) kdc_req_body['nonce'] = secrets.randbits(31) if override_etype: kdc_req_body['etype'] = override_etype else: kdc_req_body['etype'] = [self.kerberos_cipher_type] authenticator_data = {} authenticator_data['authenticator-vno'] = krb5_pvno authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm']) authenticator_data['cname'] = self.kerberos_TGT['cname'] authenticator_data['cusec'] = now.microsecond authenticator_data['ctime'] = now authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None) ap_req = {} ap_req['pvno'] = krb5_pvno ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value ap_req['ap-options'] = APOptions(set()) ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket']) ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc}) pa_data_1 = {} pa_data_1['padata-type'] = PaDataType.TGS_REQ.value pa_data_1['padata-value'] = AP_REQ(ap_req).dump() kdc_req = {} kdc_req['pvno'] = krb5_pvno kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value kdc_req['padata'] = [pa_data_1] kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body) req = TGS_REQ(kdc_req) logger.debug('Constructing TGS request to server') rep = self.ksoc.sendrecv(req.dump()) logger.debug('Got TGS reply, decrypting...') tgs = rep.native encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue']) self.ccache.add_tgs(tgs, encTGSRepPart) logger.debug('Got valid TGS reply') self.kerberos_TGS = tgs return tgs, encTGSRepPart, key
python
def get_TGS(self, spn_user, override_etype = None): """ Requests a TGS ticket for the specified user. Retruns the TGS ticket, end the decrpyted encTGSRepPart. spn_user: KerberosTarget: the service user you want to get TGS for. override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket """ #construct tgs_req logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname()) now = datetime.datetime.utcnow() kdc_req_body = {} kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize'])) kdc_req_body['realm'] = spn_user.domain.upper() kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()}) kdc_req_body['till'] = now + datetime.timedelta(days=1) kdc_req_body['nonce'] = secrets.randbits(31) if override_etype: kdc_req_body['etype'] = override_etype else: kdc_req_body['etype'] = [self.kerberos_cipher_type] authenticator_data = {} authenticator_data['authenticator-vno'] = krb5_pvno authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm']) authenticator_data['cname'] = self.kerberos_TGT['cname'] authenticator_data['cusec'] = now.microsecond authenticator_data['ctime'] = now authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None) ap_req = {} ap_req['pvno'] = krb5_pvno ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value ap_req['ap-options'] = APOptions(set()) ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket']) ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc}) pa_data_1 = {} pa_data_1['padata-type'] = PaDataType.TGS_REQ.value pa_data_1['padata-value'] = AP_REQ(ap_req).dump() kdc_req = {} kdc_req['pvno'] = krb5_pvno kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value kdc_req['padata'] = [pa_data_1] kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body) req = TGS_REQ(kdc_req) logger.debug('Constructing TGS request to server') rep = self.ksoc.sendrecv(req.dump()) logger.debug('Got TGS reply, decrypting...') tgs = rep.native encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue']) self.ccache.add_tgs(tgs, encTGSRepPart) logger.debug('Got valid TGS reply') self.kerberos_TGS = tgs return tgs, encTGSRepPart, key
[ "def", "get_TGS", "(", "self", ",", "spn_user", ",", "override_etype", "=", "None", ")", ":", "#construct tgs_req", "logger", ".", "debug", "(", "'Constructing TGS request for user %s'", "%", "spn_user", ".", "get_formatted_pname", "(", ")", ")", "now", "=", "da...
Requests a TGS ticket for the specified user. Retruns the TGS ticket, end the decrpyted encTGSRepPart. spn_user: KerberosTarget: the service user you want to get TGS for. override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
[ "Requests", "a", "TGS", "ticket", "for", "the", "specified", "user", ".", "Retruns", "the", "TGS", "ticket", "end", "the", "decrpyted", "encTGSRepPart", "." ]
caf14c1d0132119d6e8a8f05120efb7d0824b2c6
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L287-L348
train
50,489
project-rig/rig
rig/routing_table/remove_default_routes.py
minimise
def minimise(table, target_length, check_for_aliases=True): """Remove from the routing table any entries which could be replaced by default routing. Parameters ---------- routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing table from which to remove entries which could be handled by default routing. target_length : int or None Target length of the routing table. check_for_aliases : bool If True (the default), default-route candidates are checked for aliased entries before suggesting a route may be default routed. This check is required to ensure correctness in the general case but has a runtime complexity of O(N^2) in the worst case for N-entry tables. If False, the alias-check is skipped resulting in O(N) runtime. This option should only be used if the supplied table is guaranteed not to contain any aliased entries. Raises ------ MinimisationFailedError If the smallest table that can be produced is larger than `target_length`. Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Reduced routing table entries. """ # If alias checking is required, see if we can cheaply prove that no # aliases exist in the table to skip this costly check. if check_for_aliases: # Aliases cannot exist when all entries share the same mask and all # keys are unique. if len(set(e.mask for e in table)) == 1 and \ len(table) == len(set(e.key for e in table)): check_for_aliases = False # Generate a new table with default-route entries removed new_table = list() for i, entry in enumerate(table): if not _is_defaultable(i, entry, table, check_for_aliases): # If the entry cannot be removed then add it to the table new_table.append(entry) # If the resultant table is larger than the target raise an exception if target_length is not None and target_length < len(new_table): raise MinimisationFailedError(target_length, len(new_table)) return new_table
python
def minimise(table, target_length, check_for_aliases=True): """Remove from the routing table any entries which could be replaced by default routing. Parameters ---------- routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing table from which to remove entries which could be handled by default routing. target_length : int or None Target length of the routing table. check_for_aliases : bool If True (the default), default-route candidates are checked for aliased entries before suggesting a route may be default routed. This check is required to ensure correctness in the general case but has a runtime complexity of O(N^2) in the worst case for N-entry tables. If False, the alias-check is skipped resulting in O(N) runtime. This option should only be used if the supplied table is guaranteed not to contain any aliased entries. Raises ------ MinimisationFailedError If the smallest table that can be produced is larger than `target_length`. Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Reduced routing table entries. """ # If alias checking is required, see if we can cheaply prove that no # aliases exist in the table to skip this costly check. if check_for_aliases: # Aliases cannot exist when all entries share the same mask and all # keys are unique. if len(set(e.mask for e in table)) == 1 and \ len(table) == len(set(e.key for e in table)): check_for_aliases = False # Generate a new table with default-route entries removed new_table = list() for i, entry in enumerate(table): if not _is_defaultable(i, entry, table, check_for_aliases): # If the entry cannot be removed then add it to the table new_table.append(entry) # If the resultant table is larger than the target raise an exception if target_length is not None and target_length < len(new_table): raise MinimisationFailedError(target_length, len(new_table)) return new_table
[ "def", "minimise", "(", "table", ",", "target_length", ",", "check_for_aliases", "=", "True", ")", ":", "# If alias checking is required, see if we can cheaply prove that no", "# aliases exist in the table to skip this costly check.", "if", "check_for_aliases", ":", "# Aliases cann...
Remove from the routing table any entries which could be replaced by default routing. Parameters ---------- routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing table from which to remove entries which could be handled by default routing. target_length : int or None Target length of the routing table. check_for_aliases : bool If True (the default), default-route candidates are checked for aliased entries before suggesting a route may be default routed. This check is required to ensure correctness in the general case but has a runtime complexity of O(N^2) in the worst case for N-entry tables. If False, the alias-check is skipped resulting in O(N) runtime. This option should only be used if the supplied table is guaranteed not to contain any aliased entries. Raises ------ MinimisationFailedError If the smallest table that can be produced is larger than `target_length`. Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Reduced routing table entries.
[ "Remove", "from", "the", "routing", "table", "any", "entries", "which", "could", "be", "replaced", "by", "default", "routing", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/remove_default_routes.py#L5-L57
train
50,490
project-rig/rig
rig/routing_table/remove_default_routes.py
_is_defaultable
def _is_defaultable(i, entry, table, check_for_aliases=True): """Determine if an entry may be removed from a routing table and be replaced by a default route. Parameters ---------- i : int Position of the entry in the table entry : RoutingTableEntry The entry itself table : [RoutingTableEntry, ...] The table containing the entry. check_for_aliases : bool If True, the table is checked for aliased entries before suggesting a route may be default routed. """ # May only have one source and sink (which may not be None) if (len(entry.sources) == 1 and len(entry.route) == 1 and None not in entry.sources): # Neither the source nor sink may be a core source = next(iter(entry.sources)) sink = next(iter(entry.route)) if source.is_link and sink.is_link: # The source must be going in the same direction as the link if source.opposite is sink: # And the entry must not be aliased key, mask = entry.key, entry.mask if not check_for_aliases or \ not any(intersect(key, mask, d.key, d.mask) for d in table[i+1:]): return True return False
python
def _is_defaultable(i, entry, table, check_for_aliases=True): """Determine if an entry may be removed from a routing table and be replaced by a default route. Parameters ---------- i : int Position of the entry in the table entry : RoutingTableEntry The entry itself table : [RoutingTableEntry, ...] The table containing the entry. check_for_aliases : bool If True, the table is checked for aliased entries before suggesting a route may be default routed. """ # May only have one source and sink (which may not be None) if (len(entry.sources) == 1 and len(entry.route) == 1 and None not in entry.sources): # Neither the source nor sink may be a core source = next(iter(entry.sources)) sink = next(iter(entry.route)) if source.is_link and sink.is_link: # The source must be going in the same direction as the link if source.opposite is sink: # And the entry must not be aliased key, mask = entry.key, entry.mask if not check_for_aliases or \ not any(intersect(key, mask, d.key, d.mask) for d in table[i+1:]): return True return False
[ "def", "_is_defaultable", "(", "i", ",", "entry", ",", "table", ",", "check_for_aliases", "=", "True", ")", ":", "# May only have one source and sink (which may not be None)", "if", "(", "len", "(", "entry", ".", "sources", ")", "==", "1", "and", "len", "(", "...
Determine if an entry may be removed from a routing table and be replaced by a default route. Parameters ---------- i : int Position of the entry in the table entry : RoutingTableEntry The entry itself table : [RoutingTableEntry, ...] The table containing the entry. check_for_aliases : bool If True, the table is checked for aliased entries before suggesting a route may be default routed.
[ "Determine", "if", "an", "entry", "may", "be", "removed", "from", "a", "routing", "table", "and", "be", "replaced", "by", "a", "default", "route", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/remove_default_routes.py#L60-L93
train
50,491
project-rig/rig
rig/routing_table/utils.py
table_is_subset_of
def table_is_subset_of(entries_a, entries_b): """Check that every key matched by every entry in one table results in the same route when checked against the other table. For example, the table:: >>> from rig.routing_table import Routes >>> table = [ ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf), ... RoutingTableEntry({Routes.east}, 0x1, 0xf), ... RoutingTableEntry({Routes.south_west}, 0x5, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf), ... RoutingTableEntry({Routes.east}, 0x9, 0xf), ... RoutingTableEntry({Routes.south_west}, 0xe, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf), ... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb), ... ] is a functional subset of a minimised version of itself:: >>> from rig.routing_table.ordered_covering import minimise >>> other_table = minimise(table, target_length=None) >>> other_table == table False >>> table_is_subset_of(table, other_table) True But not vice-versa:: >>> table_is_subset_of(other_table, table) False Default routes are taken into account, such that the table:: >>> table = [ ... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}), ... ] is a subset of the empty table:: >>> table_is_subset_of(table, list()) True Parameters ---------- entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Ordered of lists of routing table entries to compare. Returns ------- bool True if every key matched in `entries_a` would result in an equivalent route for the packet when matched in `entries_b`. """ # Determine which bits we don't need to explicitly test for common_xs = get_common_xs(entries_b) # For every entry in the first table for entry in expand_entries(entries_a, ignore_xs=common_xs): # Look at every entry in the second table for other_entry in entries_b: # If the first entry matches the second if other_entry.mask & entry.key == other_entry.key: if other_entry.route == entry.route: # If the route is the same then we move on to the next # entry in the first table. break else: # Otherwise we return false as the tables are different return False else: # If we didn't break out of the loop then the entry from the first # table never matched an entry in the second table. If the entry # from the first table could not be default routed we return False # as the tables cannot be equivalent. default_routed = False if len(entry.route) == 1 and len(entry.sources) == 1: source = next(iter(entry.sources)) sink = next(iter(entry.route)) if (source is not None and sink.is_link and source is sink.opposite): default_routed = True if not default_routed: return False return True
python
def table_is_subset_of(entries_a, entries_b): """Check that every key matched by every entry in one table results in the same route when checked against the other table. For example, the table:: >>> from rig.routing_table import Routes >>> table = [ ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf), ... RoutingTableEntry({Routes.east}, 0x1, 0xf), ... RoutingTableEntry({Routes.south_west}, 0x5, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf), ... RoutingTableEntry({Routes.east}, 0x9, 0xf), ... RoutingTableEntry({Routes.south_west}, 0xe, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf), ... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb), ... ] is a functional subset of a minimised version of itself:: >>> from rig.routing_table.ordered_covering import minimise >>> other_table = minimise(table, target_length=None) >>> other_table == table False >>> table_is_subset_of(table, other_table) True But not vice-versa:: >>> table_is_subset_of(other_table, table) False Default routes are taken into account, such that the table:: >>> table = [ ... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}), ... ] is a subset of the empty table:: >>> table_is_subset_of(table, list()) True Parameters ---------- entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Ordered of lists of routing table entries to compare. Returns ------- bool True if every key matched in `entries_a` would result in an equivalent route for the packet when matched in `entries_b`. """ # Determine which bits we don't need to explicitly test for common_xs = get_common_xs(entries_b) # For every entry in the first table for entry in expand_entries(entries_a, ignore_xs=common_xs): # Look at every entry in the second table for other_entry in entries_b: # If the first entry matches the second if other_entry.mask & entry.key == other_entry.key: if other_entry.route == entry.route: # If the route is the same then we move on to the next # entry in the first table. break else: # Otherwise we return false as the tables are different return False else: # If we didn't break out of the loop then the entry from the first # table never matched an entry in the second table. If the entry # from the first table could not be default routed we return False # as the tables cannot be equivalent. default_routed = False if len(entry.route) == 1 and len(entry.sources) == 1: source = next(iter(entry.sources)) sink = next(iter(entry.route)) if (source is not None and sink.is_link and source is sink.opposite): default_routed = True if not default_routed: return False return True
[ "def", "table_is_subset_of", "(", "entries_a", ",", "entries_b", ")", ":", "# Determine which bits we don't need to explicitly test for", "common_xs", "=", "get_common_xs", "(", "entries_b", ")", "# For every entry in the first table", "for", "entry", "in", "expand_entries", ...
Check that every key matched by every entry in one table results in the same route when checked against the other table. For example, the table:: >>> from rig.routing_table import Routes >>> table = [ ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf), ... RoutingTableEntry({Routes.east}, 0x1, 0xf), ... RoutingTableEntry({Routes.south_west}, 0x5, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf), ... RoutingTableEntry({Routes.east}, 0x9, 0xf), ... RoutingTableEntry({Routes.south_west}, 0xe, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf), ... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb), ... ] is a functional subset of a minimised version of itself:: >>> from rig.routing_table.ordered_covering import minimise >>> other_table = minimise(table, target_length=None) >>> other_table == table False >>> table_is_subset_of(table, other_table) True But not vice-versa:: >>> table_is_subset_of(other_table, table) False Default routes are taken into account, such that the table:: >>> table = [ ... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}), ... ] is a subset of the empty table:: >>> table_is_subset_of(table, list()) True Parameters ---------- entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Ordered of lists of routing table entries to compare. Returns ------- bool True if every key matched in `entries_a` would result in an equivalent route for the packet when matched in `entries_b`.
[ "Check", "that", "every", "key", "matched", "by", "every", "entry", "in", "one", "table", "results", "in", "the", "same", "route", "when", "checked", "against", "the", "other", "table", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L108-L198
train
50,492
project-rig/rig
rig/routing_table/utils.py
expand_entry
def expand_entry(entry, ignore_xs=0x0): """Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and ``1``\ s. The following will expand any Xs in bits ``1..3``\ :: >>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100) >>> list(expand_entry(entry, 0xfffffff1)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... ] True Parameters ---------- entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar The entry to expand. ignore_xs : int Bit-mask of Xs which should not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entry but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s. """ # Get all the Xs in the entry that are not ignored xs = (~entry.key & ~entry.mask) & ~ignore_xs # Find the most significant X for bit in (1 << i for i in range(31, -1, -1)): if bit & xs: # Yield all the entries with this bit set as 0 entry_0 = RoutingTableEntry(entry.route, entry.key, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_0, ignore_xs): yield new_entry # And yield all the entries with this bit set as 1 entry_1 = RoutingTableEntry(entry.route, entry.key | bit, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_1, ignore_xs): yield new_entry # Stop looking for Xs break else: # If there are no Xs then yield the entry we were given. yield entry
python
def expand_entry(entry, ignore_xs=0x0): """Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and ``1``\ s. The following will expand any Xs in bits ``1..3``\ :: >>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100) >>> list(expand_entry(entry, 0xfffffff1)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... ] True Parameters ---------- entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar The entry to expand. ignore_xs : int Bit-mask of Xs which should not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entry but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s. """ # Get all the Xs in the entry that are not ignored xs = (~entry.key & ~entry.mask) & ~ignore_xs # Find the most significant X for bit in (1 << i for i in range(31, -1, -1)): if bit & xs: # Yield all the entries with this bit set as 0 entry_0 = RoutingTableEntry(entry.route, entry.key, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_0, ignore_xs): yield new_entry # And yield all the entries with this bit set as 1 entry_1 = RoutingTableEntry(entry.route, entry.key | bit, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_1, ignore_xs): yield new_entry # Stop looking for Xs break else: # If there are no Xs then yield the entry we were given. yield entry
[ "def", "expand_entry", "(", "entry", ",", "ignore_xs", "=", "0x0", ")", ":", "# Get all the Xs in the entry that are not ignored", "xs", "=", "(", "~", "entry", ".", "key", "&", "~", "entry", ".", "mask", ")", "&", "~", "ignore_xs", "# Find the most significant ...
Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and ``1``\ s. The following will expand any Xs in bits ``1..3``\ :: >>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100) >>> list(expand_entry(entry, 0xfffffff1)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... ] True Parameters ---------- entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar The entry to expand. ignore_xs : int Bit-mask of Xs which should not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entry but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s.
[ "Turn", "all", "Xs", "which", "are", "not", "marked", "in", "ignore_xs", "into", "0", "\\", "s", "and", "1", "\\", "s", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L234-L282
train
50,493
project-rig/rig
rig/routing_table/utils.py
expand_entries
def expand_entries(entries, ignore_xs=None): """Turn all Xs which are not ignored in all entries into ``0`` s and ``1`` s. For example:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X ... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X ... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X ... ] True Note that the ``X`` in the LSB was retained because it is common to all entries. Any duplicated entries will be removed (in this case the first and second entries will both match ``0000``, so when the second entry is expanded only one entry is retained):: >>> from rig.routing_table import Routes >>> entries = [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S ... ] True .. warning:: It is assumed that the input routing table is orthogonal (i.e., there are no two entries which would match the same key). If this is not the case, any entries which are covered (i.e. unreachable) in the input table will be omitted and a warning produced. As a result, all output routing tables are guaranteed to be orthogonal. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar The entries to expand. Other Parameters ---------------- ignore_xs : int Mask of bits in which Xs should not be expanded. If None (the default) then Xs which are common to all entries will not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entries but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s. """ # Find the common Xs for the entries if ignore_xs is None: ignore_xs = get_common_xs(entries) # Keep a track of keys that we've seen seen_keys = set({}) # Expand each entry in turn for entry in entries: for new_entry in expand_entry(entry, ignore_xs): if new_entry.key in seen_keys: # We've already used this key, warn that the table is # over-complete. warnings.warn("Table is not orthogonal: Key {:#010x} matches " "multiple entries.".format(new_entry.key)) else: # Mark the key as seen and yield the new entry seen_keys.add(new_entry.key) yield new_entry
python
def expand_entries(entries, ignore_xs=None): """Turn all Xs which are not ignored in all entries into ``0`` s and ``1`` s. For example:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X ... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X ... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X ... ] True Note that the ``X`` in the LSB was retained because it is common to all entries. Any duplicated entries will be removed (in this case the first and second entries will both match ``0000``, so when the second entry is expanded only one entry is retained):: >>> from rig.routing_table import Routes >>> entries = [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S ... ] True .. warning:: It is assumed that the input routing table is orthogonal (i.e., there are no two entries which would match the same key). If this is not the case, any entries which are covered (i.e. unreachable) in the input table will be omitted and a warning produced. As a result, all output routing tables are guaranteed to be orthogonal. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar The entries to expand. Other Parameters ---------------- ignore_xs : int Mask of bits in which Xs should not be expanded. If None (the default) then Xs which are common to all entries will not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entries but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s. """ # Find the common Xs for the entries if ignore_xs is None: ignore_xs = get_common_xs(entries) # Keep a track of keys that we've seen seen_keys = set({}) # Expand each entry in turn for entry in entries: for new_entry in expand_entry(entry, ignore_xs): if new_entry.key in seen_keys: # We've already used this key, warn that the table is # over-complete. warnings.warn("Table is not orthogonal: Key {:#010x} matches " "multiple entries.".format(new_entry.key)) else: # Mark the key as seen and yield the new entry seen_keys.add(new_entry.key) yield new_entry
[ "def", "expand_entries", "(", "entries", ",", "ignore_xs", "=", "None", ")", ":", "# Find the common Xs for the entries", "if", "ignore_xs", "is", "None", ":", "ignore_xs", "=", "get_common_xs", "(", "entries", ")", "# Keep a track of keys that we've seen", "seen_keys",...
Turn all Xs which are not ignored in all entries into ``0`` s and ``1`` s. For example:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X ... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X ... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X ... ] True Note that the ``X`` in the LSB was retained because it is common to all entries. Any duplicated entries will be removed (in this case the first and second entries will both match ``0000``, so when the second entry is expanded only one entry is retained):: >>> from rig.routing_table import Routes >>> entries = [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S ... ] >>> list(expand_entries(entries)) == [ ... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N ... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S ... ] True .. warning:: It is assumed that the input routing table is orthogonal (i.e., there are no two entries which would match the same key). If this is not the case, any entries which are covered (i.e. unreachable) in the input table will be omitted and a warning produced. As a result, all output routing tables are guaranteed to be orthogonal. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar The entries to expand. Other Parameters ---------------- ignore_xs : int Mask of bits in which Xs should not be expanded. If None (the default) then Xs which are common to all entries will not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entries but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s.
[ "Turn", "all", "Xs", "which", "are", "not", "ignored", "in", "all", "entries", "into", "0", "s", "and", "1", "s", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L285-L366
train
50,494
project-rig/rig
rig/routing_table/utils.py
get_common_xs
def get_common_xs(entries): """Return a mask of where there are Xs in all routing table entries. For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this input this method would return ``0b0001``:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> print("{:#06b}".format(get_common_xs(entries))) 0b0001 """ # Determine where there are never 1s in the key and mask key = 0x00000000 mask = 0x00000000 for entry in entries: key |= entry.key mask |= entry.mask # Where there are never 1s in the key or the mask there are Xs which are # common to all entries. return (~(key | mask)) & 0xffffffff
python
def get_common_xs(entries): """Return a mask of where there are Xs in all routing table entries. For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this input this method would return ``0b0001``:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> print("{:#06b}".format(get_common_xs(entries))) 0b0001 """ # Determine where there are never 1s in the key and mask key = 0x00000000 mask = 0x00000000 for entry in entries: key |= entry.key mask |= entry.mask # Where there are never 1s in the key or the mask there are Xs which are # common to all entries. return (~(key | mask)) & 0xffffffff
[ "def", "get_common_xs", "(", "entries", ")", ":", "# Determine where there are never 1s in the key and mask", "key", "=", "0x00000000", "mask", "=", "0x00000000", "for", "entry", "in", "entries", ":", "key", "|=", "entry", ".", "key", "mask", "|=", "entry", ".", ...
Return a mask of where there are Xs in all routing table entries. For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this input this method would return ``0b0001``:: >>> from rig.routing_table import RoutingTableEntry >>> entries = [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX ... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X ... ] >>> print("{:#06b}".format(get_common_xs(entries))) 0b0001
[ "Return", "a", "mask", "of", "where", "there", "are", "Xs", "in", "all", "routing", "table", "entries", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L369-L393
train
50,495
project-rig/rig
rig/place_and_route/allocate/utils.py
slices_overlap
def slices_overlap(slice_a, slice_b): """Test if the ranges covered by a pair of slices overlap.""" assert slice_a.step is None assert slice_b.step is None return max(slice_a.start, slice_b.start) \ < min(slice_a.stop, slice_b.stop)
python
def slices_overlap(slice_a, slice_b): """Test if the ranges covered by a pair of slices overlap.""" assert slice_a.step is None assert slice_b.step is None return max(slice_a.start, slice_b.start) \ < min(slice_a.stop, slice_b.stop)
[ "def", "slices_overlap", "(", "slice_a", ",", "slice_b", ")", ":", "assert", "slice_a", ".", "step", "is", "None", "assert", "slice_b", ".", "step", "is", "None", "return", "max", "(", "slice_a", ".", "start", ",", "slice_b", ".", "start", ")", "<", "m...
Test if the ranges covered by a pair of slices overlap.
[ "Test", "if", "the", "ranges", "covered", "by", "a", "pair", "of", "slices", "overlap", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/allocate/utils.py#L4-L10
train
50,496
project-rig/rig
rig/geometry.py
concentric_hexagons
def concentric_hexagons(radius, start=(0, 0)): """A generator which produces coordinates of concentric rings of hexagons. Parameters ---------- radius : int Number of layers to produce (0 is just one hexagon) start : (x, y) The coordinate of the central hexagon. """ x, y = start yield (x, y) for r in range(1, radius + 1): # Move to the next layer y -= 1 # Walk around the hexagon of this radius for dx, dy in [(1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1), (1, 0)]: for _ in range(r): yield (x, y) x += dx y += dy
python
def concentric_hexagons(radius, start=(0, 0)): """A generator which produces coordinates of concentric rings of hexagons. Parameters ---------- radius : int Number of layers to produce (0 is just one hexagon) start : (x, y) The coordinate of the central hexagon. """ x, y = start yield (x, y) for r in range(1, radius + 1): # Move to the next layer y -= 1 # Walk around the hexagon of this radius for dx, dy in [(1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1), (1, 0)]: for _ in range(r): yield (x, y) x += dx y += dy
[ "def", "concentric_hexagons", "(", "radius", ",", "start", "=", "(", "0", ",", "0", ")", ")", ":", "x", ",", "y", "=", "start", "yield", "(", "x", ",", "y", ")", "for", "r", "in", "range", "(", "1", ",", "radius", "+", "1", ")", ":", "# Move ...
A generator which produces coordinates of concentric rings of hexagons. Parameters ---------- radius : int Number of layers to produce (0 is just one hexagon) start : (x, y) The coordinate of the central hexagon.
[ "A", "generator", "which", "produces", "coordinates", "of", "concentric", "rings", "of", "hexagons", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L215-L235
train
50,497
project-rig/rig
rig/geometry.py
spinn5_eth_coords
def spinn5_eth_coords(width, height, root_x=0, root_y=0): """Generate a list of board coordinates with Ethernet connectivity in a SpiNNaker machine. Specifically, generates the coordinates for the Ethernet connected chips of SpiNN-5 boards arranged in a standard torus topology. .. warning:: In general, applications should use :py:class:`rig.machine_control.MachineController.get_system_info` and :py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips` to gather the coordinates of Ethernet connected chips which are actually functioning. For example:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(list(si.ethernet_connected_chips())) [((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")] Parameters ---------- width, height : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ # In oddly-shaped machines where chip (0, 0) does not exist, we must offset # the coordinates returned in accordance with the root chip's location. root_x %= 12 root_x %= 12 # Internally, work with the width and height rounded up to the next # multiple of 12 w = ((width + 11) // 12) * 12 h = ((height + 11) // 12) * 12 for x in range(0, w, 12): for y in range(0, h, 12): for dx, dy in ((0, 0), (4, 8), (8, 4)): nx = (x + dx + root_x) % w ny = (y + dy + root_y) % h # Skip points which are outside the range available if nx < width and ny < height: yield (nx, ny)
python
def spinn5_eth_coords(width, height, root_x=0, root_y=0): """Generate a list of board coordinates with Ethernet connectivity in a SpiNNaker machine. Specifically, generates the coordinates for the Ethernet connected chips of SpiNN-5 boards arranged in a standard torus topology. .. warning:: In general, applications should use :py:class:`rig.machine_control.MachineController.get_system_info` and :py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips` to gather the coordinates of Ethernet connected chips which are actually functioning. For example:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(list(si.ethernet_connected_chips())) [((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")] Parameters ---------- width, height : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ # In oddly-shaped machines where chip (0, 0) does not exist, we must offset # the coordinates returned in accordance with the root chip's location. root_x %= 12 root_x %= 12 # Internally, work with the width and height rounded up to the next # multiple of 12 w = ((width + 11) // 12) * 12 h = ((height + 11) // 12) * 12 for x in range(0, w, 12): for y in range(0, h, 12): for dx, dy in ((0, 0), (4, 8), (8, 4)): nx = (x + dx + root_x) % w ny = (y + dy + root_y) % h # Skip points which are outside the range available if nx < width and ny < height: yield (nx, ny)
[ "def", "spinn5_eth_coords", "(", "width", ",", "height", ",", "root_x", "=", "0", ",", "root_y", "=", "0", ")", ":", "# In oddly-shaped machines where chip (0, 0) does not exist, we must offset", "# the coordinates returned in accordance with the root chip's location.", "root_x",...
Generate a list of board coordinates with Ethernet connectivity in a SpiNNaker machine. Specifically, generates the coordinates for the Ethernet connected chips of SpiNN-5 boards arranged in a standard torus topology. .. warning:: In general, applications should use :py:class:`rig.machine_control.MachineController.get_system_info` and :py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips` to gather the coordinates of Ethernet connected chips which are actually functioning. For example:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(list(si.ethernet_connected_chips())) [((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")] Parameters ---------- width, height : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`.
[ "Generate", "a", "list", "of", "board", "coordinates", "with", "Ethernet", "connectivity", "in", "a", "SpiNNaker", "machine", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L281-L328
train
50,498
project-rig/rig
rig/geometry.py
spinn5_local_eth_coord
def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0): """Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12] return ((x + int(dx)) % w), ((y + int(dy)) % h)
python
def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0): """Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12] return ((x + int(dx)) % w), ((y + int(dy)) % h)
[ "def", "spinn5_local_eth_coord", "(", "x", ",", "y", ",", "w", ",", "h", ",", "root_x", "=", "0", ",", "root_y", "=", "0", ")", ":", "dx", ",", "dy", "=", "SPINN5_ETH_OFFSET", "[", "(", "y", "-", "root_y", ")", "%", "12", "]", "[", "(", "x", ...
Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`.
[ "Get", "the", "coordinates", "of", "a", "chip", "s", "local", "ethernet", "connected", "chip", "." ]
3a3e053d3214899b6d68758685835de0afd5542b
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L331-L371
train
50,499